Merge "Merge android-4.4.139 (7ba5557) into msm-4.4"

This commit is contained in:
Linux Build Service Account 2018-07-10 22:43:57 -07:00 committed by Gerrit - the friendly Code Review server
commit 4049db73f1
110 changed files with 717 additions and 273 deletions

View file

@ -273,11 +273,10 @@ struct clk:
%pC pll1
%pCn pll1
%pCr 1560000000
For printing struct clk structures. '%pC' and '%pCn' print the name
(Common Clock Framework) or address (legacy clock framework) of the
structure; '%pCr' prints the current clock rate.
structure.
Passed by reference.

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 138
SUBLEVEL = 139
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -76,7 +76,7 @@ extern int kgdb_fault_expected;
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
#define NUMREGBYTES (GDB_MAX_REGS << 2)
#define NUMCRITREGBYTES (32 << 2)
#define _R0 0

View file

@ -88,7 +88,8 @@ static inline void free_io_area(void *addr)
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
__iounmap(tmp->addr, tmp->size);
/* remove gap added in get_io_area() */
__iounmap(tmp->addr, tmp->size - IO_SIZE);
kfree(tmp);
return;
}

View file

@ -249,6 +249,12 @@ static int __init bcm47xx_cpu_fixes(void)
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
/*
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
* Enable ExternalSync for sync instruction to take effect
*/
set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}

View file

@ -411,6 +411,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
__val = *__addr; \
slow; \
\
/* prevent prefetching of coherent DMA data prematurely */ \
rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}

View file

@ -606,6 +606,8 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
/* ExternalSync */
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@ -2013,6 +2015,7 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)

View file

@ -116,10 +116,20 @@ ftrace_stub:
NESTED(_mcount, PT_SIZE, ra)
PTR_LA t1, ftrace_stub
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
bne t1, t2, static_trace
beq t1, t2, fgraph_trace
nop
MCOUNT_SAVE_REGS
move a0, ra /* arg1: self return address */
jalr t2 /* (1) call *ftrace_trace_function */
move a1, AT /* arg2: parent's return address */
MCOUNT_RESTORE_REGS
fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
PTR_LA t1, ftrace_stub
PTR_L t3, ftrace_graph_return
bne t1, t3, ftrace_graph_caller
nop
@ -128,24 +138,11 @@ NESTED(_mcount, PT_SIZE, ra)
bne t1, t3, ftrace_graph_caller
nop
#endif
b ftrace_stub
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#else
nop
#endif
static_trace:
MCOUNT_SAVE_REGS
move a0, ra /* arg1: self return address */
jalr t2 /* (1) call *ftrace_trace_function */
move a1, AT /* arg2: parent's return address */
MCOUNT_RESTORE_REGS
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
.globl ftrace_stub
ftrace_stub:
RETURN_BACK

View file

@ -574,6 +574,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
* actually hit this code path.
*/
isync
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0

View file

@ -1025,6 +1025,9 @@ void fadump_cleanup(void)
init_fadump_mem_struct(&fdm,
be64_to_cpu(fdm_active->cpu_state_data.destination_address));
fadump_invalidate_dump(&fdm);
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
}
}

View file

@ -174,8 +174,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((bp->attr.bp_addr >> 10) !=
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
if ((bp->attr.bp_addr >> 9) !=
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
return -EINVAL;
}
if (info->len >

View file

@ -1004,6 +1004,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = hw_brk.address;
attr.bp_len = 8;
arch_bp_generic_fields(hw_brk.type,
&attr.bp_type);

View file

@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
{
unsigned long mask;
asm ("cmp %1,%2; sbb %0,%0;"
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"r"(size),"r" (index)
:"cc");

View file

@ -309,7 +309,7 @@ do_unaligned_user (struct pt_regs *regs)
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *) regs->excvaddr;
force_sig_info(SIGSEGV, &info, current);
force_sig_info(SIGBUS, &info, current);
}
#endif

View file

@ -4247,9 +4247,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
/* Sandisk devices which are known to not handle LPM well */
{ "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },

View file

@ -34,7 +34,7 @@ struct zpodd {
static int eject_tray(struct ata_device *dev)
{
struct ata_taskfile tf;
const char cdb[] = { GPCMD_START_STOP_UNIT,
static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
0, 0, 0,
0x02, /* LoEj */
0, 0, 0, 0, 0, 0, 0,
@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc = (void *)(buf + 8);
struct ata_taskfile tf;
char cdb[] = { GPCMD_GET_CONFIGURATION,
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */

View file

@ -1149,8 +1149,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
}
static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
int offset, int swap)
static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
int swap)
{
unsigned char buf[ZEPROM_SIZE];
struct zatm_dev *zatm_dev;

View file

@ -764,7 +764,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
return NULL;
return ERR_PTR(-ENOMEM);
dir->class = class;
kobject_init(&dir->kobj, &class_dir_ktype);
@ -774,7 +774,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
return NULL;
return ERR_PTR(retval);
}
return &dir->kobj;
}
@ -1081,6 +1081,10 @@ int device_add(struct device *dev)
parent = get_device(dev->parent);
kobj = get_device_parent(dev, parent);
if (IS_ERR(kobj)) {
error = PTR_ERR(kobj);
goto parent_error;
}
if (kobj)
dev->kobj.parent = kobj;
@ -1179,6 +1183,7 @@ done:
kobject_del(&dev->kobj);
Error:
cleanup_glue_dir(dev, glue_dir);
parent_error:
put_device(parent);
name_error:
kfree(dev->p);
@ -1995,6 +2000,11 @@ int device_move(struct device *dev, struct device *new_parent,
device_pm_lock();
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
if (IS_ERR(new_parent_kobj)) {
error = PTR_ERR(new_parent_kobj);
put_device(new_parent);
goto out;
}
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
__func__, new_parent ? dev_name(new_parent) : "<NULL>");

View file

@ -939,6 +939,12 @@ static int qca_setup(struct hci_uart *hu)
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
} else if (ret == -EAGAIN) {
/*
* Userspace firmware loader will return -EAGAIN in case no
* patch/nvm-config is found, so run with original fw/config.
*/
ret = 0;
}
/* Setup bdaddr */

View file

@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
BT_CONTROL(BT_H_BUSY);
bt->timeout = bt->BT_CAP_req2rsp;
/* Read BT capabilities if it hasn't been done yet */
if (!bt->BT_CAP_outreqs)
BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
SI_SM_CALL_WITHOUT_DELAY);
bt->timeout = bt->BT_CAP_req2rsp;
BT_SI_SM_RETURN(SI_SM_IDLE);
case BT_STATE_XACTION_START:

View file

@ -709,6 +709,8 @@ static ssize_t store_##file_name \
struct cpufreq_policy new_policy; \
\
memcpy(&new_policy, policy, sizeof(*policy)); \
new_policy.min = policy->user_policy.min; \
new_policy.max = policy->user_policy.max; \
\
ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \

View file

@ -29,9 +29,31 @@ struct cpuidle_driver powernv_idle_driver = {
static int max_idle_state;
static struct cpuidle_state *cpuidle_state_table;
static u64 snooze_timeout;
static u64 default_snooze_timeout;
static bool snooze_timeout_en;
static u64 get_snooze_timeout(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
int i;
if (unlikely(!snooze_timeout_en))
return default_snooze_timeout;
for (i = index + 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable)
continue;
return s->target_residency * tb_ticks_per_usec;
}
return default_snooze_timeout;
}
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@ -41,7 +63,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
snooze_exit_time = get_tb() + snooze_timeout;
snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
ppc64_runlatch_off();
while (!need_resched()) {
HMT_low();
@ -286,11 +308,9 @@ static int powernv_idle_probe(void)
cpuidle_state_table = powernv_states;
/* Device tree can indicate more idle states */
max_idle_state = powernv_add_idle_states();
if (max_idle_state > 1) {
default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
if (max_idle_state > 1)
snooze_timeout_en = true;
snooze_timeout = powernv_states[1].target_residency *
tb_ticks_per_usec;
}
} else
return -ENODEV;

View file

@ -19,7 +19,7 @@ struct iio_kfifo {
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
int bytes_per_datum, int length)
size_t bytes_per_datum, unsigned int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
@ -71,7 +71,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
return 0;
}
static int iio_set_length_kfifo(struct iio_buffer *r, int length)
static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)

View file

@ -1780,7 +1780,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
default:
BUG_ON(1);
break;
}
} else {

View file

@ -1451,8 +1451,7 @@ u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
*/
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
const char *qib_get_unit_name(int unit);
/*

View file

@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
dma_addr_t daddr;
for (; ntids--; tid++) {
if (tid == tidcnt)
tid = 0;
@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
ret = -ENOMEM;
break;
}
ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
if (ret)
break;
tidlist[i] = tid + tidoff;
/* we "know" system pages and TID pages are same size */
dd->pageshadow[ctxttid + tid] = pagep[i];
dd->physshadow[ctxttid + tid] =
qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
dd->physshadow[ctxttid + tid] = daddr;
/*
* don't need atomic or it's overhead
*/

View file

@ -98,23 +98,27 @@ bail:
*
* I'm sure we won't be so lucky with other iommu's, so FIXME.
*/
dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{
dma_addr_t phys;
phys = pci_map_page(hwdev, page, offset, size, direction);
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(hwdev, phys))
return -ENOMEM;
if (phys == 0) {
pci_unmap_page(hwdev, phys, size, direction);
phys = pci_map_page(hwdev, page, offset, size, direction);
if (!phys) {
pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(hwdev, phys))
return -ENOMEM;
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
return phys;
*daddr = phys;
return 0;
}
/**

View file

@ -27,6 +27,8 @@
#define ETP_DISABLE_POWER 0x0001
#define ETP_PRESSURE_OFFSET 25
#define ETP_CALIBRATE_MAX_LEN 3
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"

View file

@ -595,7 +595,7 @@ static ssize_t calibrate_store(struct device *dev,
int tries = 20;
int retval;
int error;
u8 val[3];
u8 val[ETP_CALIBRATE_MAX_LEN];
retval = mutex_lock_interruptible(&data->sysfs_mutex);
if (retval)
@ -1250,6 +1250,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
{ "ELAN1000", 0 },
{ }
};

View file

@ -56,7 +56,7 @@
static int elan_smbus_initialize(struct i2c_client *client)
{
u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
int len, error;
/* Get hello packet */
@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
{
int error;
u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_CALIBRATE_QUERY, val);
ETP_SMBUS_CALIBRATE_QUERY, buf);
if (error < 0)
return error;
memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
return 0;
}
@ -466,6 +470,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
{
int len;
BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_PACKET_QUERY,
&report[ETP_SMBUS_REPORT_OFFSET]);

View file

@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
else if (ic_version == 7 && etd->samples[1] == 0x2A)
sanity_check = ((packet[3] & 0x1c) == 0x10);
else
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
sanity_check = ((packet[0] & 0x08) == 0x00 &&
(packet[3] & 0x1c) == 0x10);
if (!sanity_check)
@ -1177,6 +1177,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
{ }
};
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
NULL
};
/*
* Set the appropriate event bits for the input subsystem
*/
@ -1196,7 +1202,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
if (dmi_check_system(elantech_dmi_has_middle_button))
if (dmi_check_system(elantech_dmi_has_middle_button) ||
psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
__set_bit(BTN_MIDDLE, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);

View file

@ -562,4 +562,24 @@ config DM_ANDROID_VERITY
of the metadata contents are verified against the key included
in the system keyring. Upon success, the underlying verity
target is setup.
config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
bool "Verity will validate blocks at most once"
depends on DM_VERITY
---help---
Default enables at_most_once option for dm-verity
Verify data blocks only the first time they are read from the
data device, rather than every time. This reduces the overhead
of dm-verity so that it can be used on systems that are memory
and/or CPU constrained. However, it provides a reduced level
of security because only offline tampering of the data device's
content will be detected, not online tampering.
Hash blocks are still verified each time they are read from the
hash device, since verification of hash blocks is less performance
critical than data blocks, and a hash block will not be verified
any more after all the data blocks it covers have been verified anyway.
If unsure, say N.
endif # MD

View file

@ -1299,6 +1299,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static void requeue_bios(struct pool *pool);
static void check_for_space(struct pool *pool)
{
int r;
@ -1311,8 +1313,10 @@ static void check_for_space(struct pool *pool)
if (r)
return;
if (nr_free)
if (nr_free) {
set_pool_mode(pool, PM_WRITE);
requeue_bios(pool);
}
}
/*
@ -1389,7 +1393,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
if (r == -ENOSPC)
set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
else
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
return r;
}

View file

@ -1053,6 +1053,14 @@ int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
#ifdef CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
if (!v->validated_blocks) {
r = verity_alloc_most_once(v);
if (r)
goto bad;
}
#endif
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);

View file

@ -2690,7 +2690,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@ -8153,6 +8154,7 @@ static int remove_and_add_spares(struct mddev *mddev,
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
removed++;
}

View file

@ -230,8 +230,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
wake_up_interruptible (&events->wait_queue);
}
static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
struct dvb_fe_events *events)
{
int ret;
up(&fepriv->sem);
ret = events->eventw != events->eventr;
down(&fepriv->sem);
return ret;
}
static int dvb_frontend_get_event(struct dvb_frontend *fe,
struct dvb_frontend_event *event, int flags)
struct dvb_frontend_event *event, int flags)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dvb_fe_events *events = &fepriv->events;
@ -249,13 +261,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
up(&fepriv->sem);
ret = wait_event_interruptible (events->wait_queue,
events->eventw != events->eventr);
if (down_interruptible (&fepriv->sem))
return -ERESTARTSYS;
ret = wait_event_interruptible(events->wait_queue,
dvb_frontend_test_event(fepriv, events));
if (ret < 0)
return ret;

View file

@ -864,6 +864,9 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_CNXT_RDE_250},
{USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
/* AverMedia DVD EZMaker 7 */
{USB_DEVICE(0x07ca, 0xc039),
.driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
{USB_DEVICE(0x2040, 0xb110),
.driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
{USB_DEVICE(0x2040, 0xb111),

View file

@ -870,7 +870,7 @@ static int put_v4l2_ext_controls32(struct file *file,
get_user(kcontrols, &kp->controls))
return -EFAULT;
if (!count)
if (!count || count > (U32_MAX/sizeof(*ucontrols)))
return 0;
if (get_user(p, &up->controls))
return -EFAULT;

View file

@ -269,11 +269,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
intel_lpss_deassert_reset(lpss);
intel_lpss_set_remap_addr(lpss);
if (!intel_lpss_has_idma(lpss))
return;
intel_lpss_set_remap_addr(lpss);
/* Make sure that SPI multiblock DMA transfers are re-enabled */
if (lpss->type == LPSS_DEV_SPI)
writel(value, lpss->priv + LPSS_PRIV_SSP_REG);

View file

@ -1878,7 +1878,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
if (time_after(jiffies, timeo) && !chip_ready(map, adr))
break;
if (chip_ready(map, adr)) {
if (chip_good(map, adr, datum)) {
xip_enable(map, chip, adr);
goto op_done;
}
@ -2533,7 +2533,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct ppb_lock {
struct flchip *chip;
loff_t offset;
unsigned long adr;
int locked;
};
@ -2551,8 +2551,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
unsigned long timeo;
int ret;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
@ -2570,8 +2571,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
chip->state = FL_LOCKING;
map_write(map, CMD(0xA0), chip->start + adr);
map_write(map, CMD(0x00), chip->start + adr);
map_write(map, CMD(0xA0), adr);
map_write(map, CMD(0x00), adr);
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
/*
* Unlocking of one specific sector is not supported, so we
@ -2609,7 +2610,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
@ -2666,9 +2667,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
* sectors shall be unlocked, so lets keep their locking
* status at "unlocked" (locked=0) for the final re-locking.
*/
if ((adr < ofs) || (adr >= (ofs + len))) {
if ((offset < ofs) || (offset >= (ofs + len))) {
sect[sectors].chip = &cfi->chips[chipnum];
sect[sectors].offset = offset;
sect[sectors].adr = adr;
sect[sectors].locked = do_ppb_xxlock(
map, &cfi->chips[chipnum], adr, 0,
DO_XXLOCK_ONEBLOCK_GETLOCK);
@ -2682,6 +2683,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
i++;
if (adr >> cfi->chipshift) {
if (offset >= (ofs + len))
break;
adr = 0;
chipnum++;
@ -2712,7 +2715,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
*/
for (i = 0; i < sectors; i++) {
if (sect[i].locked)
do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
DO_XXLOCK_ONEBLOCK_LOCK);
}

View file

@ -1194,6 +1194,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
*/
get_device(&ubi->dev);
#ifdef CONFIG_MTD_UBI_FASTMAP
cancel_work_sync(&ubi->fm_work);
#endif
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);

View file

@ -1729,6 +1729,7 @@ int ubi_thread(void *u)
}
dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
ubi->thread_enabled = 0;
return 0;
}
@ -1738,9 +1739,6 @@ int ubi_thread(void *u)
*/
static void shutdown_work(struct ubi_device *ubi)
{
#ifdef CONFIG_MTD_UBI_FASTMAP
flush_work(&ubi->fm_work);
#endif
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;

View file

@ -1115,6 +1115,7 @@ static int bond_option_primary_set(struct bonding *bond,
slave->dev->name);
rcu_assign_pointer(bond->primary_slave, slave);
strcpy(bond->params.primary, slave->dev->name);
bond->force_primary = true;
bond_select_active_slave(bond);
goto out;
}

View file

@ -71,7 +71,7 @@ static int sonic_open(struct net_device *dev)
for (i = 0; i < SONIC_NUM_RRS; i++) {
dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
if (!laddr) {
if (dma_mapping_error(lp->device, laddr)) {
while(i > 0) { /* free any that were mapped successfully */
i--;
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);

View file

@ -1075,7 +1075,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
delayed_ndp_size = ctx->max_ndp_size;
delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
else
delayed_ndp_size = 0;
@ -1208,7 +1208,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max - ctx->max_ndp_size);
nth16->wNdpIndex = cpu_to_le16(skb_out->len);
memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);

View file

@ -635,6 +635,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */

View file

@ -237,14 +237,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
{
struct device *dev = disk->driverfs_dev;
struct nd_region *nd_region = to_nd_region(dev->parent);
const char *pol = nd_region->ro ? "only" : "write";
int disk_ro = get_disk_ro(disk);
if (nd_region->ro == get_disk_ro(disk))
/*
* Upgrade to read-only if the region is read-only preserve as
* read-only if the disk is already read-only.
*/
if (disk_ro || nd_region->ro == disk_ro)
return 0;
dev_info(dev, "%s read-%s, marking %s read-%s\n",
dev_name(&nd_region->dev), pol, disk->disk_name, pol);
set_disk_ro(disk, nd_region->ro);
dev_info(dev, "%s read-only, marking %s read-only\n",
dev_name(&nd_region->dev), disk->disk_name);
set_disk_ro(disk, 1);
return 0;

View file

@ -156,20 +156,20 @@ static void __init of_unittest_dynamic(void)
/* Add a new property - should pass*/
prop->name = "new-property";
prop->value = "new-property-data";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
@ -177,7 +177,7 @@ static void __init of_unittest_dynamic(void)
prop++;
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");

View file

@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
void pcie_enable_notification(struct controller *ctrl);
void pcie_reenable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);

View file

@ -295,7 +295,7 @@ static int pciehp_resume(struct pcie_device *dev)
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
pcie_enable_notification(ctrl);
pcie_reenable_notification(ctrl);
slot = ctrl->slot;

View file

@ -628,7 +628,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
void pcie_enable_notification(struct controller *ctrl)
static void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@ -666,6 +666,17 @@ void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
void pcie_reenable_notification(struct controller *ctrl)
{
/*
* Clear both Presence and Data Link Layer Changed to make sure
* those events still fire after we have re-enabled them.
*/
pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
pcie_enable_notification(ctrl);
}
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;

View file

@ -625,6 +625,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
/**
* zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
* @tag: Identifier for event.
* @adapter: Pointer to zfcp adapter as context for this event.
* @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
* @ret: Return value of calling function.
*
* This SCSI trace variant does not depend on any of:
* scsi_cmnd, zfcp_fsf_req, scsi_device.
*/
void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
unsigned int scsi_id, int ret)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
unsigned long flags;
static int const level = 1;
if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
return;
spin_lock_irqsave(&dbf->scsi_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
rec->scsi_retries = ~0;
rec->scsi_allowed = ~0;
rec->fcp_rsp_info = ~0;
rec->scsi_id = scsi_id;
rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
rec->host_scribble = ~0;
memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
struct debug_info *d;

View file

@ -34,11 +34,28 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
/**
* enum zfcp_erp_act_type - Type of ERP action object.
* @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
* @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
* @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
* @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
* @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
* either of the first four enum values.
* Used to indicate that an ERP action could not be
* set up despite a detected need for some recovery.
* @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
* either of the first four enum values.
* Used to indicate that ERP not needed because
* the object has ZFCP_STATUS_COMMON_ERP_FAILED.
*/
enum zfcp_erp_act_type {
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
ZFCP_ERP_ACTION_NONE = 0xc0,
ZFCP_ERP_ACTION_FAILED = 0xe0,
};
enum zfcp_erp_act_state {
@ -125,6 +142,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
}
}
static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
int need = want;
struct zfcp_scsi_dev *zsdev;
switch (want) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zsdev = sdev_to_zfcp(sdev);
if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
need = 0;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
need = 0;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED) {
need = 0;
/* ensure propagation of failed status to new devices */
zfcp_erp_set_port_status(
port, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_FAILED) {
need = 0;
/* ensure propagation of failed status to new devices */
zfcp_erp_set_adapter_status(
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
default:
need = 0;
break;
}
return need;
}
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
@ -248,16 +308,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
int retval = 1, need;
struct zfcp_erp_action *act;
if (!adapter->erp_thread)
return -EIO;
need = zfcp_erp_handle_failed(want, adapter, port, sdev);
if (!need) {
need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
goto out;
}
if (!adapter->erp_thread) {
need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
retval = -EIO;
goto out;
}
need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act)
if (!act) {
need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
}
atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
@ -268,18 +339,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
return retval;
}
void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
u64 port_name, u32 port_id)
{
unsigned long flags;
static /* don't waste stack */ struct zfcp_port tmpport;
write_lock_irqsave(&adapter->erp_lock, flags);
/* Stand-in zfcp port with fields just good enough for
* zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
* Under lock because tmpport is static.
*/
atomic_set(&tmpport.status, -1); /* unknown */
tmpport.wwpn = port_name;
tmpport.d_id = port_id;
zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
ZFCP_ERP_ACTION_NONE);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
int clear_mask, char *id)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
/* ensure propagation of failed status to new devices */
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
adapter, NULL, NULL, id, 0);
}
@ -298,12 +383,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
zfcp_scsi_schedule_rports_block(adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
else
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
NULL, NULL, id, 0);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
NULL, NULL, id, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@ -344,9 +425,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
port->adapter, port, NULL, id, 0);
}
@ -372,12 +450,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
/* ensure propagation of failed status to new devices */
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
port->adapter, port, NULL, id, 0);
}
@ -417,9 +489,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
zfcp_erp_lun_block(sdev, clear);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
zfcp_sdev->port, sdev, id, act_status);
}

View file

@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
struct zfcp_fsf_req *);
extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
unsigned int scsi_id, int ret);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_port_forced_no_port_dbf(char *id,
struct zfcp_adapter *adapter,
u64 port_name, u32 port_id);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);

View file

@ -180,6 +180,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
if (abrt_req)
break;
zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
@ -276,6 +277,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (fsf_req)
break;
zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
@ -322,15 +324,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret;
int ret = SUCCESS, fc_ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
return ret;
fc_ret = fc_block_scsi_eh(scpnt);
if (fc_ret)
ret = fc_ret;
return SUCCESS;
zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
return ret;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
@ -600,6 +603,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
} else {
zfcp_erp_port_forced_no_port_dbf(
"sctrpin", adapter,
rport->port_name /* zfcp_scsi_rport_register */,
rport->port_id /* zfcp_scsi_rport_register */);
}
}

View file

@ -3261,7 +3261,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
return;
if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
fcport->fp_speed > ha->link_data_rate)
fcport->fp_speed > ha->link_data_rate ||
!ha->flags.gpsc_supported)
return;
rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,

View file

@ -707,8 +707,14 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
for (i = 0; i < sgs; i++) {
if (vmalloced_buf) {
min = min_t(size_t,
len, desc_len - offset_in_page(buf));
/*
* Next scatterlist entry size is the minimum between
* the desc_len and the remaining buffer length that
* fits in a page.
*/
min = min_t(size_t, desc_len,
min_t(size_t, len,
PAGE_SIZE - offset_in_page(buf)));
vm_page = vmalloc_to_page(buf);
if (!vm_page) {
sg_free_table(sgt);

View file

@ -2419,13 +2419,12 @@ static void serial_console_write(struct console *co, const char *s,
unsigned long flags;
int locked = 1;
local_irq_save(flags);
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&port->lock);
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock(&port->lock);
spin_lock_irqsave(&port->lock, flags);
/* first save the SCSCR then disable the interrupts */
ctrl = serial_port_in(port, SCSCR);
@ -2442,8 +2441,7 @@ static void serial_console_write(struct console *co, const char *s,
serial_port_out(port, SCSCR, ctrl);
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&port->lock, flags);
}
static int serial_console_setup(struct console *co, char *options)

View file

@ -4457,7 +4457,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* reset. But only on the first attempt,
* lest we get into a time out/reset loop
*/
if (r == 0 || (r == -ETIMEDOUT && retries == 0))
if (r == 0 || (r == -ETIMEDOUT &&
retries == 0 &&
udev->speed > USB_SPEED_FULL))
break;
}
udev->descriptor.bMaxPacketSize0 =

View file

@ -2580,8 +2580,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
int ret;
musb_port_suspend(musb, true);
ret = musb_port_suspend(musb, true);
if (ret)
return ret;
if (!is_host_active(musb))
return 0;

View file

@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_resume_root_hub(struct musb *musb);
extern void musb_host_poke_root_hub(struct musb *musb);
extern void musb_port_suspend(struct musb *musb, bool do_suspend);
extern int musb_port_suspend(struct musb *musb, bool do_suspend);
extern void musb_port_reset(struct musb *musb, bool do_reset);
extern void musb_host_finish_resume(struct work_struct *work);
#else
@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
static inline void musb_host_resume_root_hub(struct musb *musb) {}
static inline void musb_host_poll_rh_status(struct musb *musb) {}
static inline void musb_host_poke_root_hub(struct musb *musb) {}
static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
{
return 0;
}
static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
static inline void musb_host_finish_resume(struct work_struct *work) {}
#endif

View file

@ -74,14 +74,14 @@ void musb_host_finish_resume(struct work_struct *work)
spin_unlock_irqrestore(&musb->lock, flags);
}
void musb_port_suspend(struct musb *musb, bool do_suspend)
int musb_port_suspend(struct musb *musb, bool do_suspend)
{
struct usb_otg *otg = musb->xceiv->otg;
u8 power;
void __iomem *mbase = musb->mregs;
if (!is_host_active(musb))
return;
return 0;
/* NOTE: this doesn't necessarily put PHY into low power mode,
* turning off its clock; that's a function of PHY integration and
@ -92,16 +92,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
if (do_suspend) {
int retries = 10000;
power &= ~MUSB_POWER_RESUME;
power |= MUSB_POWER_SUSPENDM;
musb_writeb(mbase, MUSB_POWER, power);
if (power & MUSB_POWER_RESUME)
return -EBUSY;
/* Needed for OPT A tests */
power = musb_readb(mbase, MUSB_POWER);
while (power & MUSB_POWER_SUSPENDM) {
if (!(power & MUSB_POWER_SUSPENDM)) {
power |= MUSB_POWER_SUSPENDM;
musb_writeb(mbase, MUSB_POWER, power);
/* Needed for OPT A tests */
power = musb_readb(mbase, MUSB_POWER);
if (retries-- < 1)
break;
while (power & MUSB_POWER_SUSPENDM) {
power = musb_readb(mbase, MUSB_POWER);
if (retries-- < 1)
break;
}
}
dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
@ -138,6 +142,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
return 0;
}
void musb_port_reset(struct musb *musb, bool do_reset)

View file

@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
static int as3711_backlight_parse_dt(struct device *dev)
{
struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
struct device_node *bl =
of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
struct device_node *bl, *fb;
int ret;
bl = of_get_child_by_name(dev->parent->of_node, "backlight");
if (!bl) {
dev_dbg(dev, "backlight node not found\n");
return -ENODEV;
@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
if (pdata->su1_max_uA <= 0)
ret = -EINVAL;
if (ret < 0)
return ret;
goto err_put_bl;
}
fb = of_parse_phandle(bl, "su2-dev", 0);
@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
if (pdata->su2_max_uA <= 0)
ret = -EINVAL;
if (ret < 0)
return ret;
goto err_put_bl;
if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
pdata->su2_feedback = AS3711_SU2_VOLTAGE;
@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
count++;
}
if (count != 1)
return -EINVAL;
if (count != 1) {
ret = -EINVAL;
goto err_put_bl;
}
count = 0;
if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
pdata->su2_fbprot = AS3711_SU2_GPIO4;
count++;
}
if (count != 1)
return -EINVAL;
if (count != 1) {
ret = -EINVAL;
goto err_put_bl;
}
count = 0;
if (of_find_property(bl, "su2-auto-curr1", NULL)) {
@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
* At least one su2-auto-curr* must be specified iff
* AS3711_SU2_CURR_AUTO is used
*/
if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO))
return -EINVAL;
if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
ret = -EINVAL;
goto err_put_bl;
}
}
of_node_put(bl);
return 0;
err_put_bl:
of_node_put(bl);
return ret;
}
static int as3711_backlight_probe(struct platform_device *pdev)

View file

@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
if (!pdata)
return;
np = of_find_node_by_name(nproot, "backlight");
np = of_get_child_by_name(nproot, "backlight");
if (!np) {
dev_err(&pdev->dev, "failed to find backlight node\n");
return;
@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
pdata->dual_string = val;
of_node_put(np);
pdev->dev.platform_data = pdata;
}

View file

@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
tps65217_bl_parse_dt(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct device_node *node = of_node_get(tps->dev->of_node);
struct device_node *node;
struct tps65217_bl_pdata *pdata, *err;
u32 val;
node = of_find_node_by_name(node, "backlight");
node = of_get_child_by_name(tps->dev->of_node, "backlight");
if (!node)
return ERR_PTR(-ENODEV);

View file

@ -1059,7 +1059,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
info->cmap.len || cmap->start < info->cmap.start)
return -EINVAL;
entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
entries = kmalloc_array(cmap->len, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;

View file

@ -113,6 +113,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
err = clk_prepare_enable(mdev->clk);
if (err)
return err;
clkrate = clk_get_rate(mdev->clk);
if (clkrate < 10000000)
dev_warn(&pdev->dev,
@ -126,12 +130,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mdev->regs))
return PTR_ERR(mdev->regs);
err = clk_prepare_enable(mdev->clk);
if (err)
return err;
if (IS_ERR(mdev->regs)) {
err = PTR_ERR(mdev->regs);
goto out_disable_clk;
}
/* Software reset 1-Wire module */
writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
@ -147,8 +149,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
err = w1_add_master_device(&mdev->bus_master);
if (err)
clk_disable_unprepare(mdev->clk);
goto out_disable_clk;
return 0;
out_disable_clk:
clk_disable_unprepare(mdev->clk);
return err;
}

View file

@ -741,7 +741,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
request_module("w1-family-0x%02x", rn->family);
request_module("w1-family-0x%02X", rn->family);
mutex_lock(&dev->mutex);
spin_lock(&w1_flock);

View file

@ -637,8 +637,6 @@ static void __unbind_from_irq(unsigned int irq)
xen_irq_info_cleanup(info);
}
BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
xen_free_irq(irq);
}

View file

@ -369,8 +369,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
s = strchr(p, del);
if (!s)
goto einval;
*s++ = '\0';
e->offset = simple_strtoul(p, &p, 10);
*s = '\0';
if (p != s) {
int r = kstrtoint(p, 10, &e->offset);
if (r != 0 || e->offset < 0)
goto einval;
}
p = s;
if (*p++)
goto einval;
pr_debug("register: offset: %#x\n", e->offset);
@ -410,7 +415,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
if (e->mask &&
string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
goto einval;
if (e->size + e->offset > BINPRM_BUF_SIZE)
if (e->size > BINPRM_BUF_SIZE ||
BINPRM_BUF_SIZE - e->size < e->offset)
goto einval;
pr_debug("register: magic/mask length: %i\n", e->size);
if (USE_DEBUG) {

View file

@ -1202,6 +1202,8 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
list_del(&sums->list);
kfree(sums);
}
if (ret < 0)
return ret;
return 1;
}
@ -1351,10 +1353,23 @@ next_slot:
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
ret = btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
extent_offset, disk_bytenr);
if (ret) {
/*
* ret could be -EIO if the above fails to read
* metadata.
*/
if (ret < 0) {
if (cow_start != (u64)-1)
cur_offset = cow_start;
goto error;
}
WARN_ON_ONCE(nolock);
goto out_check;
}
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
@ -1372,8 +1387,20 @@ next_slot:
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
ret = csum_exist_in_range(root, disk_bytenr, num_bytes);
if (ret) {
/*
* ret could be -EIO if the above fails to read
* metadata.
*/
if (ret < 0) {
if (cow_start != (u64)-1)
cur_offset = cow_start;
goto error;
}
WARN_ON_ONCE(nolock);
goto out_check;
}
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +

View file

@ -3923,11 +3923,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (!(src_file.file->f_mode & FMODE_READ))
goto out_fput;
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
goto out_fput;
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
@ -3942,6 +3937,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
mutex_lock(&src->i_mutex);
}
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = -EINVAL;
goto out_unlock;
}
/* determine range to clone */
ret = -EINVAL;
if (off + len > src->i_size || off + len < off)

View file

@ -2513,7 +2513,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
have_csum = scrub_find_csum(sctx, logical, csum);
if (have_csum == 0)
++sctx->stat.no_csum;
if (sctx->is_dev_replace && !have_csum) {
if (0 && sctx->is_dev_replace && !have_csum) {
ret = copy_nocow_pages(sctx, logical, l,
mirror_num,
physical_for_dev_replace);

View file

@ -3864,28 +3864,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
/* If there are blocks to remove, do it */
if (stop_block > first_block) {
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);

View file

@ -1903,7 +1903,7 @@ retry:
return 0;
n_group = ext4_get_group_number(sb, n_blocks_count - 1);
if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
ext4_warning(sb, "resize would cause inodes_count overflow");
return -EINVAL;
}

View file

@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
if (!dentry)
return NULL;
fc->ctl_dentry[fc->ctl_ndents++] = dentry;
inode = new_inode(fuse_control_sb);
if (!inode)
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_ino = get_next_ino();
inode->i_mode = mode;
@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
set_nlink(inode, nlink);
inode->i_private = fc;
d_add(dentry, inode);
fc->ctl_dentry[fc->ctl_ndents++] = dentry;
return dentry;
}
@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
struct dentry *dentry = fc->ctl_dentry[i];
d_inode(dentry)->i_private = NULL;
d_drop(dentry);
if (!i) {
/* Get rid of submounts: */
d_invalidate(dentry);
}
dput(dentry);
}
drop_nlink(d_inode(fuse_control_sb->s_root));

View file

@ -1657,8 +1657,19 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
return err;
if (attr->ia_valid & ATTR_OPEN) {
if (fc->atomic_o_trunc)
/* This is coming from open(..., ... | O_TRUNC); */
WARN_ON(!(attr->ia_valid & ATTR_SIZE));
WARN_ON(attr->ia_size != 0);
if (fc->atomic_o_trunc) {
/*
* No need to send request to userspace, since actual
* truncation has already been done by OPEN. But still
* need to truncate page cache.
*/
i_size_write(inode, 0);
truncate_pagecache(inode, 0);
return 0;
}
file = NULL;
}

View file

@ -1166,6 +1166,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
err_put_conn:
fuse_bdi_destroy(fc);
fuse_conn_put(fc);
sb->s_fs_info = NULL;
err_fput:
fput(file);
err:

View file

@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
int id_len;
ssize_t ret;
id_len = snprintf(id_str, sizeof(id_str), "%u", id);
id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
if (ret < 0)
return -EINVAL;
@ -626,7 +626,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
if (strcmp(upcall->im_name, im->im_name) != 0)
break;
/* Note: here we store the NUL terminator too */
len = sprintf(id_str, "%d", im->im_id) + 1;
len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
sizeof(id_str));
ret = nfs_idmap_instantiate(key, authkey, id_str, len);
break;
case IDMAP_CONV_IDTONAME:

View file

@ -3595,7 +3595,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
nfserr = nfserr_resource;
goto err_no_verf;
}
maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
maxcount = svc_max_payload(resp->rqstp);
maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
/*
* Note the rfc defines rd_maxcount as the size of the
* READDIR4resok structure, which includes the verifier above
@ -3609,7 +3610,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
/* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
if (!readdir->rd_dircount)
readdir->rd_dircount = INT_MAX;
readdir->rd_dircount = svc_max_payload(resp->rqstp);
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;

View file

@ -270,6 +270,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct dentry *lower_dentry;
struct vfsmount *lower_mnt;
struct dentry *lower_parent_dentry = NULL;
struct dentry *parent_dentry = NULL;
struct path lower_path;
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
const struct cred *saved_cred = NULL;
@ -289,11 +290,14 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
/* check disk space */
if (!check_min_free_space(dentry, 0, 1)) {
parent_dentry = dget_parent(dentry);
if (!check_min_free_space(parent_dentry, 0, 1)) {
pr_err("sdcardfs: No minimum free space.\n");
err = -ENOSPC;
dput(parent_dentry);
goto out_revert;
}
dput(parent_dentry);
/* the lower_dentry is negative here */
sdcardfs_get_lower_path(dentry, &lower_path);

View file

@ -1107,7 +1107,7 @@ static int recomp_data_node(const struct ubifs_info *c,
int err, len, compr_type, out_len;
out_len = le32_to_cpu(dn->size);
buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
if (!buf)
return -ENOMEM;

View file

@ -150,6 +150,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
sizeof(struct fileIdentDesc));
}
}
/* Got last entry outside of dir size - fs is corrupted! */
if (*nf_pos > dir->i_size)
return NULL;
return fi;
}

View file

@ -888,8 +888,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
if (!q->limits.chunk_sectors)
return q->limits.max_sectors;
return q->limits.chunk_sectors -
(offset & (q->limits.chunk_sectors - 1));
return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
(offset & (q->limits.chunk_sectors - 1))));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq)

View file

@ -111,7 +111,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect) ({ \
int ______r; \
long ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \

View file

@ -49,7 +49,7 @@ struct iio_buffer_access_funcs {
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
int (*set_length)(struct iio_buffer *buffer, int length);
int (*set_length)(struct iio_buffer *buffer, unsigned int length);
void (*release)(struct iio_buffer *buffer);
@ -78,8 +78,8 @@ struct iio_buffer_access_funcs {
* @watermark: [INTERN] number of datums to wait for poll/read.
*/
struct iio_buffer {
int length;
int bytes_per_datum;
unsigned int length;
size_t bytes_per_datum;
struct attribute_group *scan_el_attrs;
long *scan_mask;
bool scan_timestamp;

View file

@ -878,7 +878,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout, u8 role);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role);
u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,

View file

@ -28,6 +28,7 @@
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/capability.h>
#include <linux/timekeeper_internal.h>
@ -258,9 +259,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
HZ_TO_MSEC_SHR32;
# else
return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}

View file

@ -1345,9 +1345,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
return string(buf, end, NULL, spec);
switch (fmt[1]) {
case 'r':
return number(buf, end, clk_get_rate(clk), spec);
case 'n':
default:
#ifdef CONFIG_COMMON_CLK

View file

@ -708,7 +708,8 @@ done:
}
static void hci_req_add_le_create_conn(struct hci_request *req,
struct hci_conn *conn)
struct hci_conn *conn,
bdaddr_t *direct_rpa)
{
struct hci_cp_le_create_conn cp;
struct hci_dev *hdev = conn->hdev;
@ -716,11 +717,23 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp));
/* Update random address, but set require_privacy to false so
* that we never connect with an non-resolvable address.
/* If direct address was provided we use it instead of current
* address.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
if (direct_rpa) {
if (bacmp(&req->hdev->random_addr, direct_rpa))
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
direct_rpa);
/* direct address is always RPA */
own_addr_type = ADDR_LE_DEV_RANDOM;
} else {
/* Update random address, but set require_privacy to false so
* that we never connect with an non-resolvable address.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
}
/* Set window to be the same value as the interval to enable
* continuous scanning.
@ -782,7 +795,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role)
u8 role, bdaddr_t *direct_rpa)
{
struct hci_conn_params *params;
struct hci_conn *conn, *conn_unfinished;
@ -913,7 +926,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
hci_req_add_le_create_conn(&req, conn);
hci_req_add_le_create_conn(&req, conn, direct_rpa);
create_conn:
err = hci_req_run(&req, create_le_conn_complete);

View file

@ -4632,7 +4632,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
/* This function requires the caller holds hdev->lock */
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
u8 addr_type, u8 adv_type)
u8 addr_type, u8 adv_type,
bdaddr_t *direct_rpa)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@ -4683,7 +4684,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
direct_rpa);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@ -4780,8 +4782,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
bdaddr_type = irk->addr_type;
}
/* Check if we have been requested to connect to this device */
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
/* Check if we have been requested to connect to this device.
*
* direct_addr is set only for directed advertising reports (it is NULL
* for advertising reports) and is already verified to be RPA above.
*/
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
direct_addr);
if (conn && type == LE_ADV_IND) {
/* Store report for later inclusion by
* mgmt_device_connected

View file

@ -1912,7 +1912,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
int off, pad = 0;
unsigned int size_kern, match_size = mwt->match_size;
strlcpy(name, mwt->u.name, sizeof(name));
if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
return -EINVAL;
if (state->buf_kern_start)
dst = state->buf_kern_start + state->buf_kern_offset;

View file

@ -613,7 +613,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
sk->sk_rcvbuf = rcvbuf;
/* Make the window clamp follow along. */
tp->window_clamp = rcvwin;
tp->window_clamp = tcp_win_from_space(rcvbuf);
}
}
tp->rcvq_space.space = copied;

View file

@ -1638,6 +1638,10 @@ process:
reqsk_put(req);
goto discard_it;
}
if (tcp_checksum_complete(skb)) {
reqsk_put(req);
goto csum_error;
}
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;

View file

@ -1417,6 +1417,10 @@ process:
reqsk_put(req);
goto discard_it;
}
if (tcp_checksum_complete(skb)) {
reqsk_put(req);
goto csum_error;
}
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;

View file

@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
struct flowi6 *fl6 = &fl->u.ip6;
int onlyproto = 0;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
u16 offset = sizeof(*hdr);
u32 offset = sizeof(*hdr);
struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb);
u16 nhoff = IP6CB(skb)->nhoff;

View file

@ -2349,8 +2349,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
struct ipvs_sync_daemon_cfg cfg;
memset(&cfg, 0, sizeof(cfg));
strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn));
ret = -EINVAL;
if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn)) <= 0)
goto out_dec;
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
@ -2388,12 +2390,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
}
}
if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
IP_VS_SCHEDNAME_MAXLEN) {
ret = -EINVAL;
goto out_unlock;
}
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name);
ntohs(usvc.port));
ret = -EFAULT;
goto out_unlock;
}
@ -2822,7 +2831,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
.len = IP_VS_IFNAME_MAXLEN },
.len = IP_VS_IFNAME_MAXLEN - 1 },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
[IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
@ -2840,7 +2849,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_SCHEDNAME_MAXLEN },
.len = IP_VS_SCHEDNAME_MAXLEN - 1 },
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_PENAME_MAXLEN },
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,

View file

@ -626,6 +626,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
if (policy->walk.dead ||
xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
/* skip socket policies */
continue;
}
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
policy->family,

View file

@ -547,8 +547,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
return err;
strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
if (apcm == NULL)
if (apcm == NULL) {
snd_device_free(chip->card, pcm);
return -ENOMEM;
}
apcm->chip = chip;
apcm->pcm = pcm;
apcm->codec = codec;

View file

@ -851,6 +851,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),

Some files were not shown because too many files have changed in this diff Show more