Merge "Merge android-4.4@610af85 (v4.4.85) into msm-4.4"

This commit is contained in:
Linux Build Service Account 2017-09-12 14:41:59 -07:00 committed by Gerrit - the friendly Code Review server
commit 14f6bfeeeb
111 changed files with 849 additions and 382 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 82
SUBLEVEL = 85
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -88,7 +88,9 @@ extern int ioc_exists;
#define ARC_REG_SLC_FLUSH 0x904
#define ARC_REG_SLC_INVALIDATE 0x905
#define ARC_REG_SLC_RGN_START 0x914
#define ARC_REG_SLC_RGN_START1 0x915
#define ARC_REG_SLC_RGN_END 0x916
#define ARC_REG_SLC_RGN_END1 0x917
/* Bit val in SLC_CONTROL */
#define SLC_CTRL_IM 0x040

View file

@ -543,6 +543,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned int ctrl;
phys_addr_t end;
spin_lock_irqsave(&lock, flags);
@ -572,8 +573,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
* END needs to be setup before START (latter triggers the operation)
* END can't be same as START, so add (l2_line_sz - 1) to sz
*/
write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
end = paddr + sz + l2_line_sz - 1;
if (is_pae40_enabled())
write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
if (is_pae40_enabled())
write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);

View file

@ -10,9 +10,17 @@
#ifdef CONFIG_THREAD_INFO_IN_TASK
struct task_struct;
/*
* We don't use read_sysreg() as we want the compiler to cache the value where
* possible.
*/
static __always_inline struct task_struct *get_current(void)
{
return (struct task_struct *)read_sysreg(sp_el0);
unsigned long sp_el0;
asm ("mrs %0, sp_el0" : "=r" (sp_el0));
return (struct task_struct *)sp_el0;
}
#define current get_current()
#else

View file

@ -115,10 +115,10 @@
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE 0x100000000UL
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#ifndef __ASSEMBLY__

View file

@ -28,8 +28,10 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
* We don't use this_cpu_read(cpu_number) as that has implicit writes to
* preempt_count, and associated (compiler) barriers, that we'd like to avoid
* the expense of. If we're preemptible, the value can be stale at use anyway.
* And we can't use this_cpu_ptr() either, as that winds up recursing back
* here under CONFIG_DEBUG_PREEMPT=y.
*/
#define raw_smp_processor_id() (*this_cpu_ptr(&cpu_number))
#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
struct seq_file;

View file

@ -369,7 +369,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_THREAD_INFO_IN_TASK
init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
#else
init_thread_info.ttbr0 = (init_thread_union.thread_info);
init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
#endif
#endif

View file

@ -117,11 +117,10 @@
.set T1, REG_T1
.endm
#define K_BASE %r8
#define HASH_PTR %r9
#define BLOCKS_CTR %r8
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
@ -205,14 +204,14 @@
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
vmovdqu (i * 2)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
vpaddd K_XMM(K_BASE), WY, WY_TMP
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
@ -255,7 +254,7 @@
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
vpaddd K_XMM(K_BASE), WY, WY_TMP
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@ -291,7 +290,7 @@
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
vpaddd K_XMM(K_BASE), WY, WY_TMP
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@ -446,6 +445,16 @@
.endm
/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
* %1 + %2 >= %3 ? %4 : 0
*/
.macro ADD_IF_GE a, b, c, d
mov \a, RTA
add $\d, RTA
cmp $\c, \b
cmovge RTA, \a
.endm
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
@ -463,13 +472,16 @@
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
PRECALC_OFFSET = 0
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
PRECALC_OFFSET = 128
/* Go to next block if needed */
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
xchg WK_BUF, PRECALC_BUF
.align 32
@ -479,8 +491,8 @@ _loop:
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
cmp K_BASE, BUFFER_PTR
jne _begin
test BLOCKS_CTR, BLOCKS_CTR
jnz _begin
.align 32
jmp _end
.align 32
@ -512,10 +524,10 @@ _loop0:
.set j, j+2
.endr
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
/* Update Counter */
sub $1, BLOCKS_CTR
/* Move to the next block only if needed*/
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
/*
* rounds
* 60,62,64,66,68
@ -532,8 +544,8 @@ _loop0:
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
cmp K_BASE, BUFFER_PTR /* is current block the last one? */
je _loop
test BLOCKS_CTR, BLOCKS_CTR
jz _loop
mov TB, B
@ -575,10 +587,10 @@ _loop2:
.set j, j+2
.endr
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
/* update counter */
sub $1, BLOCKS_CTR
/* Move to the next block only if needed*/
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3
_loop3:
@ -641,19 +653,12 @@ _loop3:
avx2_zeroupper
lea K_XMM_AR(%rip), K_BASE
/* Setup initial values */
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
lea 64(BUF), BUFFER_PTR2
shl $6, CNT /* mul by 64 */
add BUF, CNT
add $64, CNT
mov CNT, BUFFER_END
cmp BUFFER_END, BUFFER_PTR2
cmovae K_BASE, BUFFER_PTR2
mov BUF, BUFFER_PTR2
mov CNT, BLOCKS_CTR
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP

View file

@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;

View file

@ -1190,6 +1190,8 @@ ENTRY(nmi)
* other IST entries.
*/
ASM_CLAC
/* Use %rdx as our temp variable throughout */
pushq %rdx

View file

@ -247,11 +247,11 @@ extern int force_personality32;
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
0x100000000UL)
(TASK_SIZE / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,

View file

@ -153,7 +153,7 @@ static void __intel_pmu_lbr_enable(bool pmi)
*/
if (cpuc->lbr_sel)
lbr_select = cpuc->lbr_sel->config;
if (!pmi)
if (!pmi && cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, lbr_select);
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
@ -432,8 +432,10 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
int out = 0;
int num = x86_pmu.lbr_nr;
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
num = tos;
if (cpuc->lbr_sel) {
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
num = tos;
}
for (i = 0; i < num; i++) {
unsigned long lbr_idx = (tos - i) & mask;

View file

@ -1067,6 +1067,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
if (list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
synchronize_rcu();
break;
case ACPI_HEST_NOTIFY_NMI:
ghes_nmi_remove(ghes);

View file

@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
struct resource *res = data;
struct resource_win win;
/*
* We might assign this to 'res' later, make sure all pointers are
* cleared before the resource is added to the global list
*/
memset(&win, 0, sizeof(win));
res->flags = 0;
if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
return AE_OK;

View file

@ -1247,6 +1247,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
if (ret)
return ret;
if (!state->allow_modeset) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@ -1257,7 +1260,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
return ret;
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);

View file

@ -715,13 +715,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;

View file

@ -148,8 +148,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| DSMR_DIPM_DE | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
@ -171,7 +171,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}

View file

@ -642,13 +642,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
}
ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
of_node_put(encoder);
of_node_put(connector);
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
"failed to initialize encoder %s (%d), skipping\n",
encoder->full_name, ret);
"failed to initialize encoder %s on output %u (%d), skipping\n",
of_node_full_name(encoder), output, ret);
of_node_put(encoder);
of_node_put(connector);
return ret;
}

View file

@ -56,11 +56,11 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
return ret;
/* PLL clock configuration */
if (freq <= 38000)
if (freq < 39000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
else if (freq <= 60000)
else if (freq < 61000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
else if (freq <= 121000)
else if (freq < 121000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
else
pllcr = LVDPLLCR_PLLDLYCNT_150M;
@ -102,7 +102,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
/* Turn the PLL on, wait for the startup delay, and turn the output
* on.
*/
lvdcr0 |= LVDCR0_PLLEN;
lvdcr0 |= LVDCR0_PLLON;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
usleep_range(100, 150);

View file

@ -18,7 +18,7 @@
#define LVDCR0_DMD (1 << 12)
#define LVDCR0_LVMD_MASK (0xf << 8)
#define LVDCR0_LVMD_SHIFT 8
#define LVDCR0_PLLEN (1 << 4)
#define LVDCR0_PLLON (1 << 4)
#define LVDCR0_BEN (1 << 2)
#define LVDCR0_LVEN (1 << 1)
#define LVDCR0_LVRES (1 << 0)

View file

@ -294,7 +294,7 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif
#ifdef CONFIG_PM
static int dw_i2c_plat_suspend(struct device *dev)
static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@ -318,11 +318,21 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_suspend(struct device *dev)
{
pm_runtime_resume(dev);
return dw_i2c_plat_runtime_suspend(dev);
}
#endif
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
dw_i2c_plat_resume,
NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)

View file

@ -194,7 +194,6 @@ struct bmc150_accel_data {
struct device *dev;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
atomic_t active_intr;
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
struct mutex mutex;
u8 fifo_mode, watermark;
@ -489,11 +488,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
goto out_fix_power_state;
}
if (state)
atomic_inc(&data->active_intr);
else
atomic_dec(&data->active_intr);
return 0;
out_fix_power_state:
@ -1704,8 +1698,7 @@ static int bmc150_accel_resume(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
mutex_lock(&data->mutex);
if (atomic_read(&data->active_intr))
bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);

View file

@ -77,7 +77,7 @@
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
#define VF610_ADC_REFSEL_VALT 0x100
#define VF610_ADC_REFSEL_VALT 0x800
#define VF610_ADC_REFSEL_VBG 0x1000
#define VF610_ADC_ADTRG_HARD 0x2000
#define VF610_ADC_AVGS_8 0x4000

View file

@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0;
if (state) {
if (!atomic_read(&st->user_requested_state))
return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@ -84,6 +82,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
&report_val);
}
pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
st->pdev->name, state_val, report_val);
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
if (ret < 0) {
@ -175,8 +177,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
pm_runtime_use_autosuspend(&attrb->pdev->dev);
return ret;
error_unreg_trigger:
iio_trigger_unregister(trig);

View file

@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500),
.accel_max_scale = 5,
.accel_max_scale = 10,
},
[ADIS16485] = {
.channels = adis16485_channels,

View file

@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),

View file

@ -1234,7 +1234,12 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN1000", 0 },
{ }
};

View file

@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1;
if (param[0] != TP_MAGIC_IDENT)
/* add new TP ID. */
if (!(param[0] & TP_MAGIC_IDENT))
return -1;
if (firmware_id)

View file

@ -21,8 +21,9 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */
/* Firmware ID includes 0x1, 0x2, 0x3 */
/*

View file

@ -148,9 +148,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
struct device_node *np;
void __iomem *regs;
np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
if (!np)
np = of_find_compatible_node(root, NULL,
np = of_find_compatible_node(NULL, NULL,
"atmel,at91sam9x5-rtc");
if (!np)
@ -202,7 +202,6 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches)
return;
match = of_match_node(matches, root);
of_node_put(root);
if (match) {
void (*fixup)(struct device_node *) = match->data;

View file

@ -565,6 +565,14 @@ config QPNP_MISC
peripheral. The MISC peripheral holds the USB ID interrupt
and the driver provides an API to check if this interrupt
is available on the current PMIC chip.
config UID_SYS_STATS_DEBUG
bool "Per-TASK statistics"
depends on UID_SYS_STATS
default n
help
Per TASK based io statistics exported to /proc/uid_io
config MEMORY_STATE_TIME
tristate "Memory freq/bandwidth time statistics"
depends on PROFILING

View file

@ -125,6 +125,11 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
/*
* MEI HW Section
*/

View file

@ -86,10 +86,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
/* required last entry */
{0, }
};

View file

@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/rtmutex.h>
@ -52,6 +53,15 @@ struct io_stats {
#define UID_STATE_DEAD_TASKS 4
#define UID_STATE_SIZE 5
#define MAX_TASK_COMM_LEN 256
struct task_entry {
char comm[MAX_TASK_COMM_LEN];
pid_t pid;
struct io_stats io[UID_STATE_SIZE];
struct hlist_node hash;
};
struct uid_entry {
uid_t uid;
cputime_t utime;
@ -61,8 +71,231 @@ struct uid_entry {
int state;
struct io_stats io[UID_STATE_SIZE];
struct hlist_node hash;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
#endif
};
static u64 compute_write_bytes(struct task_struct *task)
{
if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
return 0;
return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
}
static void compute_io_bucket_stats(struct io_stats *io_bucket,
struct io_stats *io_curr,
struct io_stats *io_last,
struct io_stats *io_dead)
{
/* tasks could switch to another uid group, but its io_last in the
* previous uid group could still be positive.
* therefore before each update, do an overflow check first
*/
int64_t delta;
delta = io_curr->read_bytes + io_dead->read_bytes -
io_last->read_bytes;
io_bucket->read_bytes += delta > 0 ? delta : 0;
delta = io_curr->write_bytes + io_dead->write_bytes -
io_last->write_bytes;
io_bucket->write_bytes += delta > 0 ? delta : 0;
delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
io_bucket->rchar += delta > 0 ? delta : 0;
delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
io_bucket->wchar += delta > 0 ? delta : 0;
delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
io_bucket->fsync += delta > 0 ? delta : 0;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
memset(io_dead, 0, sizeof(struct io_stats));
}
#ifdef CONFIG_UID_SYS_STATS_DEBUG
static void get_full_task_comm(struct task_entry *task_entry,
struct task_struct *task)
{
int i = 0, offset = 0, len = 0;
/* save one byte for terminating null character */
int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
char buf[unused_len];
struct mm_struct *mm = task->mm;
/* fill the first TASK_COMM_LEN bytes with thread name */
get_task_comm(task_entry->comm, task);
i = strlen(task_entry->comm);
while (i < TASK_COMM_LEN)
task_entry->comm[i++] = ' ';
/* next the executable file name */
if (mm) {
down_read(&mm->mmap_sem);
if (mm->exe_file) {
char *pathname = d_path(&mm->exe_file->f_path, buf,
unused_len);
if (!IS_ERR(pathname)) {
len = strlcpy(task_entry->comm + i, pathname,
unused_len);
i += len;
task_entry->comm[i++] = ' ';
unused_len--;
}
}
up_read(&mm->mmap_sem);
}
unused_len -= len;
/* fill the rest with command line argument
* replace each null or new line character
* between args in argv with whitespace */
len = get_cmdline(task, buf, unused_len);
while (offset < len) {
if (buf[offset] != '\0' && buf[offset] != '\n')
task_entry->comm[i++] = buf[offset];
else
task_entry->comm[i++] = ' ';
offset++;
}
/* get rid of trailing whitespaces in case when arg is memset to
* zero before being reset in userspace
*/
while (task_entry->comm[i-1] == ' ')
i--;
task_entry->comm[i] = '\0';
}
static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
struct task_struct *task)
{
struct task_entry *task_entry;
hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
task->pid) {
if (task->pid == task_entry->pid) {
/* if thread name changed, update the entire command */
int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
- task_entry->comm;
if (strncmp(task_entry->comm, task->comm, len))
get_full_task_comm(task_entry, task);
return task_entry;
}
}
return NULL;
}
static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
struct task_struct *task)
{
struct task_entry *task_entry;
pid_t pid = task->pid;
task_entry = find_task_entry(uid_entry, task);
if (task_entry)
return task_entry;
task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
if (!task_entry)
return NULL;
get_full_task_comm(task_entry, task);
task_entry->pid = pid;
hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
return task_entry;
}
static void remove_uid_tasks(struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
struct hlist_node *tmp_task;
hash_for_each_safe(uid_entry->task_entries, bkt_task,
tmp_task, task_entry, hash) {
hash_del(&task_entry->hash);
kfree(task_entry);
}
}
static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
}
}
static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot)
{
struct task_entry *task_entry = find_or_register_task(uid_entry, task);
struct io_stats *task_io_slot = &task_entry->io[slot];
task_io_slot->read_bytes += task->ioac.read_bytes;
task_io_slot->write_bytes += compute_write_bytes(task);
task_io_slot->rchar += task->ioac.rchar;
task_io_slot->wchar += task->ioac.wchar;
task_io_slot->fsync += task->ioac.syscfs;
}
static void compute_io_uid_tasks(struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
compute_io_bucket_stats(&task_entry->io[uid_entry->state],
&task_entry->io[UID_STATE_TOTAL_CURR],
&task_entry->io[UID_STATE_TOTAL_LAST],
&task_entry->io[UID_STATE_DEAD_TASKS]);
}
}
static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
{
struct task_entry *task_entry;
unsigned long bkt_task;
hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
/* Separated by comma because space exists in task comm */
seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
task_entry->comm,
(unsigned long)task_entry->pid,
task_entry->io[UID_STATE_FOREGROUND].rchar,
task_entry->io[UID_STATE_FOREGROUND].wchar,
task_entry->io[UID_STATE_FOREGROUND].read_bytes,
task_entry->io[UID_STATE_FOREGROUND].write_bytes,
task_entry->io[UID_STATE_BACKGROUND].rchar,
task_entry->io[UID_STATE_BACKGROUND].wchar,
task_entry->io[UID_STATE_BACKGROUND].read_bytes,
task_entry->io[UID_STATE_BACKGROUND].write_bytes,
task_entry->io[UID_STATE_FOREGROUND].fsync,
task_entry->io[UID_STATE_BACKGROUND].fsync);
}
}
#else
static void remove_uid_tasks(struct uid_entry *uid_entry) {};
static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot) {};
static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
static void show_io_uid_tasks(struct seq_file *m,
struct uid_entry *uid_entry) {}
#endif
static struct uid_entry *find_uid_entry(uid_t uid)
{
struct uid_entry *uid_entry;
@ -86,7 +319,9 @@ static struct uid_entry *find_or_register_uid(uid_t uid)
return NULL;
uid_entry->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
hash_init(uid_entry->task_entries);
#endif
hash_add(hash_table, &uid_entry->hash, uid);
return uid_entry;
@ -192,6 +427,7 @@ static ssize_t uid_remove_write(struct file *file,
hash_for_each_possible_safe(hash_table, uid_entry, tmp,
hash, (uid_t)uid_start) {
if (uid_start == uid_entry->uid) {
remove_uid_tasks(uid_entry);
hash_del(&uid_entry->hash);
kfree(uid_entry);
}
@ -208,13 +444,6 @@ static const struct file_operations uid_remove_fops = {
.write = uid_remove_write,
};
static u64 compute_write_bytes(struct task_struct *task)
{
if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
return 0;
return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
}
static void add_uid_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot)
@ -226,28 +455,8 @@ static void add_uid_io_stats(struct uid_entry *uid_entry,
io_slot->rchar += task->ioac.rchar;
io_slot->wchar += task->ioac.wchar;
io_slot->fsync += task->ioac.syscfs;
}
static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
struct io_stats *io_curr,
struct io_stats *io_last,
struct io_stats *io_dead)
{
io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
io_last->read_bytes;
io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
io_last->write_bytes;
io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
memset(io_dead, 0, sizeof(struct io_stats));
add_uid_tasks_io_stats(uid_entry, task, slot);
}
static void update_io_stats_all_locked(void)
@ -258,9 +467,11 @@ static void update_io_stats_all_locked(void)
unsigned long bkt;
uid_t uid;
hash_for_each(hash_table, bkt, uid_entry, hash)
hash_for_each(hash_table, bkt, uid_entry, hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
}
rcu_read_lock();
do_each_thread(temp, task) {
@ -274,10 +485,11 @@ static void update_io_stats_all_locked(void)
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
}
}
@ -288,6 +500,7 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
rcu_read_lock();
do_each_thread(temp, task) {
@ -297,12 +510,14 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
} while_each_thread(temp, task);
rcu_read_unlock();
compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
}
static int uid_io_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
@ -314,21 +529,22 @@ static int uid_io_show(struct seq_file *m, void *v)
hash_for_each(hash_table, bkt, uid_entry, hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
uid_entry->uid,
uid_entry->io[UID_STATE_FOREGROUND].rchar,
uid_entry->io[UID_STATE_FOREGROUND].wchar,
uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
uid_entry->io[UID_STATE_BACKGROUND].rchar,
uid_entry->io[UID_STATE_BACKGROUND].wchar,
uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
uid_entry->io[UID_STATE_FOREGROUND].fsync,
uid_entry->io[UID_STATE_BACKGROUND].fsync);
uid_entry->uid,
uid_entry->io[UID_STATE_FOREGROUND].rchar,
uid_entry->io[UID_STATE_FOREGROUND].wchar,
uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
uid_entry->io[UID_STATE_BACKGROUND].rchar,
uid_entry->io[UID_STATE_BACKGROUND].wchar,
uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
uid_entry->io[UID_STATE_FOREGROUND].fsync,
uid_entry->io[UID_STATE_BACKGROUND].fsync);
show_io_uid_tasks(m, uid_entry);
}
rt_mutex_unlock(&uid_lock);
return 0;
}

View file

@ -707,6 +707,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */

View file

@ -177,6 +177,16 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb)
/* Packet that contains a length */
if (tmp[0] == 0 && tmp[1] == 0) {
phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3;
/*
* Ensure next_read_size does not exceed sizeof(tmp)
* for reading that many bytes during next iteration
*/
if (phy->next_read_size > FDP_NCI_I2C_MAX_PAYLOAD) {
dev_dbg(&client->dev, "%s: corrupted packet\n",
__func__);
phy->next_read_size = 5;
goto flush;
}
} else {
phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD;

View file

@ -217,7 +217,8 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev,
atr_req = (struct st21nfca_atr_req *)skb->data;
if (atr_req->length < sizeof(struct st21nfca_atr_req)) {
if (atr_req->length < sizeof(struct st21nfca_atr_req) ||
atr_req->length > skb->len) {
r = -EPROTO;
goto exit;
}

View file

@ -321,23 +321,33 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
*/
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
if (skb->len < NFC_MIN_AID_LENGTH + 2 ||
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
/*
* Buffer should have enough space for at least
* two tag fields + two length fields + aid_len (skb->data[1])
*/
if (skb->len < skb->data[1] + 4)
return -EPROTO;
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2],
transaction->aid_len);
/* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
NFC_EVT_TRANSACTION_PARAMS_TAG)
return -EPROTO;
transaction->params_len = skb->data[transaction->aid_len + 3];
/* Check next byte is PARAMETERS tag (82) and the length field */
if (skb->data[transaction->aid_len + 2] !=
NFC_EVT_TRANSACTION_PARAMS_TAG ||
skb->len < transaction->aid_len + transaction->params_len + 4) {
devm_kfree(dev, transaction);
return -EPROTO;
}
memcpy(transaction->params, skb->data +
transaction->aid_len + 4, transaction->params_len);

View file

@ -599,7 +599,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
if (!mw->virt_addr)
return -ENOMEM;
if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
@ -947,7 +947,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->event_handler = NULL;
ntb_qp_link_down_reset(qp);
if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
@ -1065,8 +1065,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
qp_count = ilog2(qp_bitmap);
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
else if (mw_count < qp_count)
qp_count = mw_count;
else if (nt->mw_count < qp_count)
qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;

View file

@ -954,7 +954,7 @@ static int __init dino_probe(struct parisc_device *dev)
dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);

View file

@ -194,8 +194,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
spin_unlock_irqrestore(&bank->slock, flags);
exynos_irq_unmask(irqd);
return 0;
}
@ -216,8 +214,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
exynos_irq_mask(irqd);
spin_lock_irqsave(&bank->slock, flags);
con = readl(d->virt_base + reg_con);

View file

@ -811,6 +811,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */
SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */
SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
SUNXI_FUNCTION(0x5, "sim"), /* DET */
SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),

View file

@ -468,7 +468,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad2s1210_state *st = iio_priv(indio_dev);
bool negative;
u16 negative;
int ret = 0;
u16 pos;
s16 vel;

View file

@ -49,6 +49,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
};

View file

@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
@ -1996,6 +1997,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return 0;

View file

@ -1219,9 +1219,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock);
goto exit;
@ -1254,7 +1256,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
iscsit_put_transport(conn->conn_transport);

View file

@ -1851,7 +1851,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
/* No more submits can occur */
spin_lock_irq(&hcd_urb_list_lock);
rescan:
list_for_each_entry (urb, &ep->urb_list, urb_list) {
list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
int is_in;
if (urb->unlinked)
@ -2505,6 +2505,8 @@ void usb_hc_died (struct usb_hcd *hcd)
}
if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
hcd = hcd->shared_hcd;
clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);

View file

@ -4674,7 +4674,8 @@ hub_power_remaining(struct usb_hub *hub)
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
int status, i;
int status = -ENODEV;
int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@ -4878,9 +4879,10 @@ loop:
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
hcd->driver->relinquish_port(hcd, port1);
if (hcd->driver->relinquish_port && !hub->hdev->parent) {
if (status != -ENOTCONN && status != -ENODEV)
hcd->driver->relinquish_port(hcd, port1);
}
}
/* Handle physical or logical connection change events.

View file

@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },

View file

@ -127,6 +127,22 @@ out:
*/
#define USB_ACPI_LOCATION_VALID (1 << 31)
static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
int raw)
{
struct acpi_device *adev;
if (!parent)
return NULL;
list_for_each_entry(adev, &parent->children, node) {
if (acpi_device_adr(adev) == raw)
return adev;
}
return acpi_find_child_device(parent, raw, false);
}
static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{
struct usb_device *udev;
@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
int raw;
raw = usb_hcd_find_raw_port_number(hcd, port1);
adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
raw, false);
adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
raw);
if (!adev)
return NULL;
} else {
@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
return NULL;
acpi_bus_get_device(parent_handle, &adev);
adev = acpi_find_child_device(adev, port1, false);
adev = usb_acpi_find_port(adev, port1);
if (!adev)
return NULL;
}

View file

@ -1540,6 +1540,18 @@ static void android_disconnect(struct usb_gadget *gadget)
gi = container_of(cdev, struct gadget_info, cdev);
/* FIXME: There's a race between usb_gadget_udc_stop() which is likely
* to set the gadget driver to NULL in the udc driver and this drivers
* gadget disconnect fn which likely checks for the gadget driver to
* be a null ptr. It happens that unbind (doing set_gadget_data(NULL))
* is called before the gadget driver is set to NULL and the udc driver
* calls disconnect fn which results in cdev being a null ptr.
*/
if (cdev == NULL) {
WARN(1, "%s: gadget driver already disconnected\n", __func__);
return;
}
/* accessory HID support can be active while the
accessory function is not actually enabled,
so we need to inform it when we are disconnected.

View file

@ -865,6 +865,14 @@ static struct hid_driver acc_hid_driver = {
.probe = acc_hid_probe,
};
static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req)
{
/*
* Default no-op function when nothing needs to be done for the
* setup request
*/
}
int acc_ctrlrequest(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl)
{
@ -892,6 +900,7 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
schedule_delayed_work(
&dev->start_work, msecs_to_jiffies(10));
value = 0;
cdev->req->complete = acc_complete_setup_noop;
} else if (b_request == ACCESSORY_SEND_STRING) {
dev->string_index = w_index;
cdev->gadget->ep0->driver_data = dev;
@ -900,10 +909,13 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
w_index == 0 && w_length == 0) {
dev->audio_mode = w_value;
cdev->req->complete = acc_complete_setup_noop;
value = 0;
} else if (b_request == ACCESSORY_REGISTER_HID) {
cdev->req->complete = acc_complete_setup_noop;
value = acc_register_hid(dev, w_value, w_index);
} else if (b_request == ACCESSORY_UNREGISTER_HID) {
cdev->req->complete = acc_complete_setup_noop;
value = acc_unregister_hid(dev, w_value);
} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
spin_lock_irqsave(&dev->lock, flags);
@ -938,7 +950,7 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
if (b_request == ACCESSORY_GET_PROTOCOL) {
*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
value = sizeof(u16);
cdev->req->complete = acc_complete_setup_noop;
/* clear any string left over from a previous session */
memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
memset(dev->model, 0, sizeof(dev->model));

View file

@ -89,6 +89,7 @@ enum amd_chipset_gen {
AMD_CHIPSET_HUDSON2,
AMD_CHIPSET_BOLTON,
AMD_CHIPSET_YANGTZE,
AMD_CHIPSET_TAISHAN,
AMD_CHIPSET_UNKNOWN,
};
@ -132,6 +133,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
}
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x145c, NULL);
if (pinfo->smbus_dev) {
pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@ -251,11 +257,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
/* Make sure amd chipset type has already been initialized */
usb_amd_find_chipset_info();
if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
return 0;
dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
return 1;
if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);

View file

@ -138,6 +138,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
mdelay(1);
}
}

View file

@ -135,6 +135,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */

View file

@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */

View file

@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },

View file

@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
#define ATEN_PRODUCT_UC485 0x2021
#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb

View file

@ -123,9 +123,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
"",
"INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,

View file

@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
((bfn1 == bfn2) || ((bfn1+1) == bfn2));
return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
/*
* XXX: Add support for merging bio_vec when using different page

View file

@ -183,15 +183,20 @@ cifs_bp_rename_retry:
}
/*
* Don't allow path components longer than the server max.
* Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix.
*/
static int
check_name(struct dentry *direntry)
check_name(struct dentry *direntry, struct cifs_tcon *tcon)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i;
if (unlikely(direntry->d_name.len >
tcon->fsAttrInfo.MaxPathNameComponentLength))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
for (i = 0; i < direntry->d_name.len; i++) {
if (direntry->d_name.name[i] == '\\') {
@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
return finish_no_open(file, res);
}
rc = check_name(direntry);
if (rc)
return rc;
xid = get_xid();
cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
}
tcon = tlink_tcon(tlink);
rc = check_name(direntry, tcon);
if (rc)
goto out_free_xid;
server = tcon->ses->server;
if (server->ops->new_lease_key)
@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
pTcon = tlink_tcon(tlink);
rc = check_name(direntry);
rc = check_name(direntry, pTcon);
if (rc)
goto lookup_out;

View file

@ -2768,8 +2768,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
kst->f_bfree = kst->f_bavail =
le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
return;
}

View file

@ -55,7 +55,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
if (unlikely(!ff))
return NULL;

View file

@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
config PNFS_BLOCK
tristate
depends on NFS_V4_1 && BLK_DEV_DM
depends on 64BIT || LBDAF
default NFS_V4
config PNFS_OBJLAYOUT

View file

@ -30,6 +30,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
{
nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
nfs4_pnfs_ds_put(mirror_ds->ds);
kfree(mirror_ds->ds_versions);
kfree_rcu(mirror_ds, id_node.rcu);
}

View file

@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2);
argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
argp->pagelen = 0;
} else {
argp->end = argp->p + (PAGE_SIZE>>2);
@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]);
argp->pagelist++;
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
next_decode_page(argp);
}
argp->p += XDR_QUADLEN(len);

View file

@ -16,6 +16,7 @@
#ifdef CONFIG_CPUSETS
extern struct static_key cpusets_pre_enable_key;
extern struct static_key cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
@ -30,12 +31,14 @@ static inline int nr_cpusets(void)
static inline void cpuset_inc(void)
{
static_key_slow_inc(&cpusets_pre_enable_key);
static_key_slow_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
static_key_slow_dec(&cpusets_enabled_key);
static_key_slow_dec(&cpusets_pre_enable_key);
}
extern int cpuset_init(void);
@ -104,7 +107,7 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
if (!cpusets_enabled())
if (!static_key_false(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(&current->mems_allowed_seq);
@ -118,7 +121,7 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
if (!cpusets_enabled())
if (!static_key_false(&cpusets_enabled_key))
return false;
return read_seqcount_retry(&current->mems_allowed_seq, seq);

View file

@ -2201,22 +2201,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
static inline int pid_alive(const struct task_struct *p);
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
@ -2251,6 +2235,23 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{

View file

@ -317,7 +317,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
!forwarding)
return dst_mtu(dst);
return min(dst->dev->mtu, IP_MAX_MTU);
return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
}
static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
@ -330,7 +330,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}
u32 ip_idents_reserve(u32 hash, int segs);

View file

@ -717,8 +717,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
old = *pold;
*pold = new;
if (old != NULL) {
qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
unsigned int qlen = old->q.qlen;
unsigned int backlog = old->qstats.backlog;
qdisc_reset(old);
qdisc_tree_reduce_backlog(old, qlen, backlog);
}
sch_tree_unlock(sch);

View file

@ -784,6 +784,7 @@ struct iscsi_np {
int np_sock_type;
enum np_thread_state_table np_thread_state;
bool enabled;
atomic_t np_reset_count;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;

View file

@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
list_del(&krule->rlist);
if (list_empty(&watch->rules)) {
/*
* audit_remove_watch() drops our reference to 'parent' which
* can get freed. Grab our own reference to be safe.
*/
audit_get_parent(parent);
audit_remove_watch(watch);
if (list_empty(&parent->watches)) {
audit_get_parent(parent);
if (list_empty(&parent->watches))
fsnotify_destroy_mark(&parent->mark, audit_watch_group);
audit_put_parent(parent);
}
audit_put_parent(parent);
}
}

View file

@ -60,6 +60,7 @@
#include <linux/cgroup.h>
#include <linux/wait.h>
struct static_key cpusets_pre_enable_key __read_mostly = STATIC_KEY_INIT_FALSE;
struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
/* See "Frequency meter" comments, below. */

View file

@ -8698,28 +8698,27 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
* Make sure we're both events for the same CPU;
* grouping events for different CPUs is broken; since
* you can never concurrently schedule them anyhow.
*/
if (move_group) {
/*
* Make sure we're both on the same task, or both
* per-cpu events.
*/
if (group_leader->ctx->task != ctx->task)
goto err_context;
if (group_leader->cpu != event->cpu)
goto err_context;
/*
* Make sure we're both events for the same CPU;
* grouping events for different CPUs is broken; since
* you can never concurrently schedule them anyhow.
*/
if (group_leader->cpu != event->cpu)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Make sure we're both on the same task, or both
* per-CPU events.
*/
if (group_leader->ctx->task != ctx->task)
goto err_context;
/*
* Do not allow to attach to a group in a different task
* or CPU context. If we're moving SW events, we'll fix
* this up later, so allow that.
*/
if (!move_group && group_leader->ctx != ctx)
goto err_context;
/*
* Only a group leader can be exclusive or pinned

View file

@ -1979,6 +1979,10 @@ static int create_filter(struct trace_event_call *call,
if (err && set_str)
append_filter_err(ps, filter);
}
if (err && !set_str) {
free_event_filter(filter);
filter = NULL;
}
create_filter_finish(ps);
*filterp = filter;

View file

@ -895,11 +895,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
if (vma) {
up_read(&current->mm->mmap_sem);
vma = NULL;
}
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {

View file

@ -40,6 +40,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
#include <linux/ptrace.h>
#include <asm/tlbflush.h>
@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
const int __user *, nodes,
int __user *, status, int, flags)
{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
struct mm_struct *mm;
int err;
@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
* capabilities, superuser privileges or the same
* userid as the target process.
* process. Use the regular "ptrace_may_access()" checks.
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
!capable(CAP_SYS_NICE)) {
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out;

View file

@ -6910,7 +6910,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
pr_info("%s: [%lx, %lx) PFNs busy\n",
pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
goto done;

View file

@ -484,16 +484,16 @@ static int bnep_session(void *arg)
struct net_device *dev = s->dev;
struct sock *sk = s->sock->sk;
struct sk_buff *skb;
wait_queue_t wait;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG("");
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
/* Ensure session->terminate is updated */
smp_mb__before_atomic();
if (atomic_read(&s->terminate))
break;
@ -515,9 +515,8 @@ static int bnep_session(void *arg)
break;
netif_wake_queue(dev);
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@ -663,7 +662,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
s = __bnep_get_session(req->dst);
if (s) {
atomic_inc(&s->terminate);
wake_up_process(s->task);
wake_up_interruptible(sk_sleep(s->sock->sk));
} else
err = -ENOENT;

View file

@ -281,16 +281,16 @@ static int cmtp_session(void *arg)
struct cmtp_session *session = arg;
struct sock *sk = session->sock->sk;
struct sk_buff *skb;
wait_queue_t wait;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG("session %pK", session);
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
/* Ensure session->terminate is updated */
smp_mb__before_atomic();
if (atomic_read(&session->terminate))
break;
@ -307,9 +307,8 @@ static int cmtp_session(void *arg)
cmtp_process_transmit(session);
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
@ -394,7 +393,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
err = cmtp_attach_device(session);
if (err < 0) {
atomic_inc(&session->terminate);
wake_up_process(session->task);
wake_up_interruptible(sk_sleep(session->sock->sk));
up_write(&cmtp_session_sem);
return err;
}
@ -432,7 +431,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
/* Stop session thread */
atomic_inc(&session->terminate);
wake_up_process(session->task);
/* Ensure session->terminate is updated */
smp_mb__after_atomic();
wake_up_interruptible(sk_sleep(session->sock->sk));
} else
err = -ENOENT;

View file

@ -36,6 +36,7 @@
#define VERSION "1.2"
static DECLARE_RWSEM(hidp_session_sem);
static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
static LIST_HEAD(hidp_session_list);
static unsigned char hidp_keycode[256] = {
@ -1069,12 +1070,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
* Wake up session thread and notify it to stop. This is asynchronous and
* returns immediately. Call this whenever a runtime error occurs and you want
* the session to stop.
* Note: wake_up_process() performs any necessary memory-barriers for us.
* Note: wake_up_interruptible() performs any necessary memory-barriers for us.
*/
static void hidp_session_terminate(struct hidp_session *session)
{
atomic_inc(&session->terminate);
wake_up_process(session->task);
wake_up_interruptible(&hidp_session_wq);
}
/*
@ -1181,7 +1182,9 @@ static void hidp_session_run(struct hidp_session *session)
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&hidp_session_wq, &wait);
for (;;) {
/*
* This thread can be woken up two ways:
@ -1189,12 +1192,10 @@ static void hidp_session_run(struct hidp_session *session)
* session->terminate flag and wakes this thread up.
* - Via modifying the socket state of ctrl/intr_sock. This
* thread is woken up by ->sk_state_changed().
*
* Note: set_current_state() performs any necessary
* memory-barriers for us.
*/
set_current_state(TASK_INTERRUPTIBLE);
/* Ensure session->terminate is updated */
smp_mb__before_atomic();
if (atomic_read(&session->terminate))
break;
@ -1228,11 +1229,22 @@ static void hidp_session_run(struct hidp_session *session)
hidp_process_transmit(session, &session->ctrl_transmit,
session->ctrl_sock);
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(&hidp_session_wq, &wait);
atomic_inc(&session->terminate);
set_current_state(TASK_RUNNING);
/* Ensure session->terminate is updated */
smp_mb__after_atomic();
}
static int hidp_session_wake_function(wait_queue_t *wait,
unsigned int mode,
int sync, void *key)
{
wake_up_interruptible(&hidp_session_wq);
return false;
}
/*
@ -1245,7 +1257,8 @@ static void hidp_session_run(struct hidp_session *session)
static int hidp_session_thread(void *arg)
{
struct hidp_session *session = arg;
wait_queue_t ctrl_wait, intr_wait;
DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
BT_DBG("session %pK", session);
@ -1255,8 +1268,6 @@ static int hidp_session_thread(void *arg)
set_user_nice(current, -15);
hidp_set_timer(session);
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
/* This memory barrier is paired with wq_has_sleeper(). See

View file

@ -24,6 +24,7 @@
#include <net/checksum.h>
#include <net/inet_sock.h>
#include <net/inet_common.h>
#include <net/sock.h>
#include <net/xfrm.h>
@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
EXPORT_SYMBOL_GPL(dccp_packet_name);
static void dccp_sk_destruct(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_tx_ccid = NULL;
inet_sock_destruct(sk);
}
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{
struct dccp_sock *dp = dccp_sk(sk);
@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
icsk->icsk_syn_retries = sysctl_dccp_request_retries;
sk->sk_state = DCCP_CLOSED;
sk->sk_write_space = dccp_write_space;
sk->sk_destruct = dccp_sk_destruct;
icsk->icsk_sync_mss = dccp_sync_mss;
dp->dccps_mss_cache = 536;
dp->dccps_rate_last = jiffies;
@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
/*
* DCCP doesn't use sk_write_queue, just sk_send_head
* for retransmissions
*/
__skb_queue_purge(&sk->sk_write_queue);
if (sk->sk_send_head != NULL) {
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
dp->dccps_hc_rx_ackvec = NULL;
}
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
dp->dccps_hc_rx_ccid = NULL;
/* clean up feature negotiation state */
dccp_feat_list_purge(&dp->dccps_featneg);

View file

@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (!fi)
goto failure;
fib_info_cnt++;
if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
if (!fi->fib_metrics)
goto failure;
if (unlikely(!fi->fib_metrics)) {
kfree(fi);
return ERR_PTR(err);
}
atomic_set(&fi->fib_metrics->refcnt, 1);
} else
} else {
fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
}
fib_info_cnt++;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;

View file

@ -1247,7 +1247,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
if (mtu)
return mtu;
mtu = dst->dev->mtu;
mtu = READ_ONCE(dst->dev->mtu);
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
if (rt->rt_uses_gateway && mtu > 576)

View file

@ -3029,8 +3029,7 @@ void tcp_rearm_rto(struct sock *sk)
/* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
*/
if (delta > 0)
rto = delta;
rto = max(delta, 1);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX);

View file

@ -892,6 +892,8 @@ add:
}
nsiblings = iter->rt6i_nsiblings;
fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
rt6_release(iter);
if (nsiblings) {
@ -904,6 +906,8 @@ add:
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next;
fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
rt6_release(iter);
nsiblings--;
} else {
@ -992,7 +996,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
/* Create subtree root node */
sfn = node_alloc();
if (!sfn)
goto st_failure;
goto failure;
sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
@ -1008,12 +1012,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
if (IS_ERR(sn)) {
/* If it is failed, discard just allocated
root, and then (in st_failure) stale node
root, and then (in failure) stale node
in main tree.
*/
node_free(sfn);
err = PTR_ERR(sn);
goto st_failure;
goto failure;
}
/* Now link new subtree to main tree */
@ -1027,7 +1031,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
if (IS_ERR(sn)) {
err = PTR_ERR(sn);
goto st_failure;
goto failure;
}
}
@ -1069,22 +1073,22 @@ out:
atomic_inc(&pn->leaf->rt6i_ref);
}
#endif
if (!(rt->dst.flags & DST_NOCACHE))
dst_free(&rt->dst);
goto failure;
}
return err;
#ifdef CONFIG_IPV6_SUBTREES
/* Subtree creation failed, probably main tree node
is orphan. If it is, shoot it.
failure:
/* fn->leaf could be NULL if fn is an intermediate node and we
* failed to add the new route to it in both subtree creation
* failure and fib6_add_rt2node() failure case.
* In both cases, fib6_repair_tree() should be called to fix
* fn->leaf.
*/
st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn);
if (!(rt->dst.flags & DST_NOCACHE))
dst_free(&rt->dst);
return err;
#endif
}
/*

View file

@ -2227,7 +2227,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
struct irda_device_list list;
struct irda_device_list list = { 0 };
struct irda_device_info *discoveries;
struct irda_ias_set * ias_opt; /* IAS get/query params */
struct ias_object * ias_obj; /* Object in IAS */

View file

@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
static int pfkey_broadcast(struct sk_buff *skb,
static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
rcu_read_unlock();
if (one_sk != NULL)
err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
kfree_skb(skb2);
kfree_skb(skb);
@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));
pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
xfrm_state_put(x);
pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
return 0;
}
@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
return 0;
}
@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
return -ENOBUFS;
}
pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
sock_net(sk));
return 0;
}
@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
sock_net(sk));
}
static int key_notify_sa_flush(const struct km_event *c)
@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
new_hdr->sadb_msg_errno = 0;
}
pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid;
pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
return 0;
}
@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
err = 0;
out:
@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c)
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;
pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;
pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
return 0;
}
@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
xfrm_ctx->ctx_len);
}
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
}
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
}
#ifdef CONFIG_NET_KEY_MIGRATE
@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
}
/* broadcast migrate message to sockets */
pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
return 0;

View file

@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
if (!t) {
rcu_read_unlock();
return NULL;
}
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len + var_alloc_len;
alloc_size = t->alloc_size + var_alloc_len;
@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
if (!t) {
rcu_read_unlock();
return NULL;
}
newoff = ALIGN(old->len, t->align);
newlen = newoff + t->len + var_alloc_len;
@ -186,6 +193,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);

View file

@ -1713,18 +1713,9 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
if (sk != NULL) {
set_sk_callback_lock = true;
read_lock_bh(&sk->sk_callback_lock);
MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
par->hooknum, sk, sk->sk_socket,
sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
filp = sk->sk_socket ? sk->sk_socket->file : NULL;
MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
}
if (sk == NULL || sk->sk_socket == NULL) {
if (sk == NULL) {
/*
* Here, the qtaguid_find_sk() using connection tracking
* couldn't find the owner, so for now we just count them
@ -1732,9 +1723,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
*/
if (do_tag_stat)
account_for_uid(skb, sk, 0, par);
MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
par->hooknum,
sk ? sk->sk_socket : NULL);
MT_DEBUG("qtaguid[%d]: leaving (sk=NULL)\n", par->hooknum);
res = (info->match ^ info->invert) == 0;
atomic64_inc(&qtu_events.match_no_sk);
goto put_sock_ret_res;
@ -1742,17 +1731,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
res = false;
goto put_sock_ret_res;
}
filp = sk->sk_socket->file;
if (filp == NULL) {
MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
if (do_tag_stat)
account_for_uid(skb, sk, 0, par);
res = ((info->match ^ info->invert) &
(XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
atomic64_inc(&qtu_events.match_no_sk_file);
goto put_sock_ret_res;
}
sock_uid = filp->f_cred->fsuid;
sock_uid = sk->sk_uid;
if (do_tag_stat)
account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
@ -1766,8 +1745,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
uid_lte(filp->f_cred->fsuid, uid_max)) ^
if ((uid_gte(sk->sk_uid, uid_min) &&
uid_lte(sk->sk_uid, uid_max)) ^
!(info->invert & XT_QTAGUID_UID)) {
MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
par->hooknum);
@ -1778,7 +1757,19 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (info->match & XT_QTAGUID_GID) {
kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
set_sk_callback_lock = true;
read_lock_bh(&sk->sk_callback_lock);
MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n",
par->hooknum, sk, sk->sk_socket,
sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
filp = sk->sk_socket ? sk->sk_socket->file : NULL;
if (!filp) {
res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0;
atomic64_inc(&qtu_events.match_no_sk_gid);
goto put_sock_ret_res;
}
MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_QTAGUID_GID)) {
@ -1950,7 +1941,7 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
"match_found_sk_in_ct=%llu "
"match_found_no_sk_in_ct=%llu "
"match_no_sk=%llu "
"match_no_sk_file=%llu\n",
"match_no_sk_gid=%llu\n",
(u64)atomic64_read(&qtu_events.sockets_tagged),
(u64)atomic64_read(&qtu_events.sockets_untagged),
(u64)atomic64_read(&qtu_events.counter_set_changes),
@ -1962,7 +1953,7 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
(u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
(u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
(u64)atomic64_read(&qtu_events.match_no_sk),
(u64)atomic64_read(&qtu_events.match_no_sk_file));
(u64)atomic64_read(&qtu_events.match_no_sk_gid));
/* Count the following as part of the last item_index. No need
* to lock the sock_tag_list here since it is already locked when

View file

@ -289,10 +289,10 @@ struct qtaguid_event_counts {
*/
atomic64_t match_no_sk;
/*
* The file ptr in the sk_socket wasn't there.
* The file ptr in the sk_socket wasn't there and we couldn't get GID.
* This might happen for traffic while the socket is being closed.
*/
atomic64_t match_no_sk_file;
atomic64_t match_no_sk_gid;
};
/* Track the set active_set for the given tag. */

View file

@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
status = NFC_HCI_ANY_E_NOK;
goto exit;
}
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
status = NFC_HCI_ANY_E_NOK;
goto exit;
}
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;

View file

@ -34,6 +34,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int
{
struct xt_tgchk_param par;
struct xt_target *target;
struct ipt_entry e = {};
int ret = 0;
target = xt_request_find_target(AF_INET, t->u.user.name,
@ -44,6 +45,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int
t->u.kernel.target = target;
memset(&par, 0, sizeof(par));
par.table = table;
par.entryinfo = &e;
par.target = target;
par.targinfo = t->data;
par.hook_mask = hook;

View file

@ -434,6 +434,7 @@ congestion_drop:
qdisc_drop(head, sch);
slot_queue_add(slot, skb);
qdisc_tree_reduce_backlog(sch, 0, delta);
return NET_XMIT_CN;
}
@ -465,8 +466,10 @@ enqueue:
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
if (qlen != slot->qlen)
if (qlen != slot->qlen) {
qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
return NET_XMIT_CN;
}
/* As we dropped a packet, better let upper stack know this */
qdisc_tree_reduce_backlog(sch, 1, dropped);

View file

@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
{
addr->sa.sa_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_addr = *saddr;
addr->v6.sin6_scope_id = 0;
}
/* Compare addresses exactly.

View file

@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
kfree_skb(msg->rep);
msg->rep = NULL;
return -ENOMEM;
}
err = __tipc_nl_compat_dumpit(cmd, msg, arg);
if (err)
if (err) {
kfree_skb(msg->rep);
msg->rep = NULL;
}
kfree_skb(arg);
return err;

View file

@ -1691,6 +1691,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
struct sk_buff *skb;
int err;
err = verify_policy_dir(dir);
if (err)
return ERR_PTR(err);
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
@ -2216,6 +2220,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
int n = 0;
struct net *net = sock_net(skb->sk);
err = verify_policy_dir(pi->dir);
if (err)
return err;
if (attrs[XFRMA_MIGRATE] == NULL)
return -EINVAL;
@ -2331,6 +2339,11 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
{
struct net *net = &init_net;
struct sk_buff *skb;
int err;
err = verify_policy_dir(dir);
if (err)
return err;
skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
if (skb == NULL)
@ -2985,6 +2998,11 @@ out_free_skb:
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
int err;
err = verify_policy_dir(dir);
if (err)
return err;
switch (c->event) {
case XFRM_MSG_NEWPOLICY:

View file

@ -1126,7 +1126,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
change = memcmp(ue->tlv_data, new_data, size) != 0;
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;

View file

@ -1530,19 +1530,14 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
void __user *arg)
{
struct snd_seq_queue_info info;
int result;
struct snd_seq_queue *q;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
if (result < 0)
return result;
q = queueptr(result);
if (q == NULL)
return -EINVAL;
q = snd_seq_queue_alloc(client->number, info.locked, info.flags);
if (IS_ERR(q))
return PTR_ERR(q);
info.queue = q->queue;
info.locked = q->locked;
@ -1552,7 +1547,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
if (! info.name[0])
snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
strlcpy(q->name, info.name, sizeof(q->name));
queuefree(q);
snd_use_lock_free(&q->use_lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;

View file

@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
static void queue_use(struct snd_seq_queue *queue, int client, int use);
/* allocate a new queue -
* return queue index value or negative value for error
* return pointer to new queue or ERR_PTR(-errno) for error
* The new queue's use_lock is set to 1. It is the caller's responsibility to
* call snd_use_lock_free(&q->use_lock).
*/
int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
{
struct snd_seq_queue *q;
q = queue_new(client, locked);
if (q == NULL)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
q->info_flags = info_flags;
queue_use(q, client, 1);
snd_use_lock_use(&q->use_lock);
if (queue_list_add(q) < 0) {
snd_use_lock_free(&q->use_lock);
queue_delete(q);
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}
return q->queue;
return q;
}
/* delete a queue - queue must be owned by the client */

Some files were not shown because too many files have changed in this diff Show more