This is the 4.4.147 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAltsFTEACgkQONu9yGCS aT7VPg/+KkII11i9uplgHn1nTKf1NTePO+Ur/LWJ8w1JR+2aRp2nresV3chI6gN/ zgraNSzRMrcfno7ERl5Ryrd9YKlsR1JTFBjW6Q9xHfiVu5jKxm7tDU1Quzknmwdy qXReqtQCtmttOcJqRGVIgDWEy6XxUB2eOLU++nZNCrTw90M+hiTC0COVnY/qaoUd +pYjjdMdG/qIB345gua+o+4q/yuV/cpfSwKf3ycEQZistzS8wvKwV1Szm4DXp1v/ mGIOx/a5NBRUKlHSdD46QBR9TvugeS4kb5m5vBh6LLum0TWl+Gh0PCg3Q2pBHGWp iofDHcZga3LnX5rckXVwI69MPoCG3gXei5F8soYcdiGf0XOK2nZN/HSNUB2rBdhw G8n/Ojr4owedpc8X8Vle19/iQGu2RDh8UfeMRAeUujG2DaWF+YCTy69IY3aNI2Vo YCNUApib56YnG7/Y/SPLua7kEYIK2z99q8Vc1dW98nqqDXmLPzH78dHmVvLz0WmL vQfKkPKGM6Ae4YTLM+2Le2BtyQu42FC5fRm1ewPIATo/6Dxdq/+5+O+G2bAg2qD6 kySslEtyKQ/B1IthALmD5ZDO5Q4B2GhewUtwlbo0LbfVB97otdOOlvLyCjNYdRbz HlCU+BPuh7SDkaJ9spz9P6j8OcDk+/vhgtAd3g16kIXAWecCvf4= =0wLN -----END PGP SIGNATURE----- Merge 4.4.147 into android-4.4 Changes in 4.4.147 scsi: qla2xxx: Fix ISP recovery on unload scsi: qla2xxx: Return error when TMF returns genirq: Make force irq threading setup more robust nohz: Fix local_timer_softirq_pending() netlink: Do not subscribe to non-existent groups netlink: Don't shift with UB on nlk->ngroups netlink: Don't shift on 64 for ngroups ext4: fix false negatives *and* false positives in ext4_check_descriptors() ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle ring_buffer: tracing: Inherit the tracing setting to next ring buffer i2c: imx: Fix reinit_completion() use jfs: Fix inconsistency between memory allocation and ea_buf->max_size Linux 4.4.147 Change-Id: I067f9844278976dddef8063961a70e189c423de3 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
a5fc66599b
13 changed files with 54 additions and 18 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 146
|
||||
SUBLEVEL = 147
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
|
|||
goto err_desc;
|
||||
}
|
||||
|
||||
reinit_completion(&dma->cmd_complete);
|
||||
txdesc->callback = i2c_imx_dma_callback;
|
||||
txdesc->callback_param = i2c_imx;
|
||||
if (dma_submit_error(dmaengine_submit(txdesc))) {
|
||||
|
@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
|
|||
* The first byte must be transmitted by the CPU.
|
||||
*/
|
||||
imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
|
||||
reinit_completion(&i2c_imx->dma->cmd_complete);
|
||||
time_left = wait_for_completion_timeout(
|
||||
&i2c_imx->dma->cmd_complete,
|
||||
msecs_to_jiffies(DMA_TIMEOUT));
|
||||
|
@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
|
|||
if (result)
|
||||
return result;
|
||||
|
||||
reinit_completion(&i2c_imx->dma->cmd_complete);
|
||||
time_left = wait_for_completion_timeout(
|
||||
&i2c_imx->dma->cmd_complete,
|
||||
msecs_to_jiffies(DMA_TIMEOUT));
|
||||
|
|
|
@ -543,7 +543,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
|
|||
union acpi_object *obj;
|
||||
struct pci_host_bridge *bridge;
|
||||
|
||||
if (acpi_pci_disabled || !bus->bridge)
|
||||
if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
|
||||
return;
|
||||
|
||||
acpi_pci_slot_enumerate(bus);
|
||||
|
|
|
@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
|
|||
|
||||
wait_for_completion(&tm_iocb->u.tmf.comp);
|
||||
|
||||
rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
|
||||
QLA_SUCCESS : QLA_FUNCTION_FAILED;
|
||||
rval = tm_iocb->u.tmf.data;
|
||||
|
||||
if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8030,
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x8030,
|
||||
"TM IOCB failed (%x).\n", rval);
|
||||
}
|
||||
|
||||
|
|
|
@ -4938,8 +4938,9 @@ qla2x00_do_dpc(void *data)
|
|||
}
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(ISP_ABORT_NEEDED,
|
||||
&base_vha->dpc_flags)) {
|
||||
if (test_and_clear_bit
|
||||
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
|
||||
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
|
||||
|
||||
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
|
||||
"ISP abort scheduled.\n");
|
||||
|
|
|
@ -2101,7 +2101,7 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
|
||||
ext4_fsblk_t last_block;
|
||||
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
|
||||
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
|
||||
ext4_fsblk_t block_bitmap;
|
||||
ext4_fsblk_t inode_bitmap;
|
||||
ext4_fsblk_t inode_table;
|
||||
|
@ -3776,13 +3776,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
goto failed_mount2;
|
||||
}
|
||||
}
|
||||
sbi->s_gdb_count = db_count;
|
||||
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
|
||||
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
|
||||
ret = -EFSCORRUPTED;
|
||||
goto failed_mount2;
|
||||
}
|
||||
|
||||
sbi->s_gdb_count = db_count;
|
||||
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
|
||||
spin_lock_init(&sbi->s_next_gen_lock);
|
||||
|
||||
|
|
|
@ -493,15 +493,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
|
|||
if (size > PSIZE) {
|
||||
/*
|
||||
* To keep the rest of the code simple. Allocate a
|
||||
* contiguous buffer to work with
|
||||
* contiguous buffer to work with. Make the buffer large
|
||||
* enough to make use of the whole extent.
|
||||
*/
|
||||
ea_buf->xattr = kmalloc(size, GFP_KERNEL);
|
||||
ea_buf->max_size = (size + sb->s_blocksize - 1) &
|
||||
~(sb->s_blocksize - 1);
|
||||
|
||||
ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
|
||||
if (ea_buf->xattr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ea_buf->flag = EA_MALLOC;
|
||||
ea_buf->max_size = (size + sb->s_blocksize - 1) &
|
||||
~(sb->s_blocksize - 1);
|
||||
|
||||
if (ea_size == 0)
|
||||
return 0;
|
||||
|
|
|
@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
|
|||
void ring_buffer_record_off(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_on(struct ring_buffer *buffer);
|
||||
int ring_buffer_record_is_on(struct ring_buffer *buffer);
|
||||
int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
|
||||
|
|
|
@ -1012,6 +1012,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
|
|||
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* No further action required for interrupts which are requested as
|
||||
* threaded interrupts already
|
||||
*/
|
||||
if (new->handler == irq_default_primary_handler)
|
||||
return 0;
|
||||
|
||||
new->flags |= IRQF_ONESHOT;
|
||||
|
||||
/*
|
||||
|
@ -1019,7 +1026,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
|
|||
* thread handler. We force thread them as well by creating a
|
||||
* secondary action.
|
||||
*/
|
||||
if (new->handler != irq_default_primary_handler && new->thread_fn) {
|
||||
if (new->handler && new->thread_fn) {
|
||||
/* Allocate the secondary action */
|
||||
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||
if (!new->secondary)
|
||||
|
|
|
@ -570,7 +570,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
|||
|
||||
static inline bool local_timer_softirq_pending(void)
|
||||
{
|
||||
return local_softirq_pending() & TIMER_SOFTIRQ;
|
||||
return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
|
||||
}
|
||||
|
||||
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
|
|
|
@ -3141,6 +3141,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
|
|||
return !atomic_read(&buffer->record_disabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_record_is_set_on - return true if the ring buffer is set writable
|
||||
* @buffer: The ring buffer to see if write is set enabled
|
||||
*
|
||||
* Returns true if the ring buffer is set writable by ring_buffer_record_on().
|
||||
* Note that this does NOT mean it is in a writable state.
|
||||
*
|
||||
* It may return true when the ring buffer has been disabled by
|
||||
* ring_buffer_record_disable(), as that is a temporary disabling of
|
||||
* the ring buffer.
|
||||
*/
|
||||
int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
|
||||
{
|
||||
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
|
||||
* @buffer: The ring buffer to stop writes to.
|
||||
|
|
|
@ -1089,6 +1089,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|||
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
|
||||
/* Inherit the recordable setting from trace_buffer */
|
||||
if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
|
||||
ring_buffer_record_on(tr->max_buffer.buffer);
|
||||
else
|
||||
ring_buffer_record_off(tr->max_buffer.buffer);
|
||||
|
||||
buf = tr->trace_buffer.buffer;
|
||||
tr->trace_buffer.buffer = tr->max_buffer.buffer;
|
||||
tr->max_buffer.buffer = buf;
|
||||
|
|
|
@ -986,6 +986,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (nlk->ngroups == 0)
|
||||
groups = 0;
|
||||
else if (nlk->ngroups < 8*sizeof(groups))
|
||||
groups &= (1UL << nlk->ngroups) - 1;
|
||||
|
||||
bound = nlk->bound;
|
||||
if (bound) {
|
||||
/* Ensure nlk->portid is up-to-date. */
|
||||
|
|
Loading…
Add table
Reference in a new issue