Merge android-4.4.98 (3d4af8) into msm-4.4

* refs/heads/tmp-03d4af8
  Linux 4.4.98
  PKCS#7: fix unitialized boolean 'want'
  x86/oprofile/ppro: Do not use __this_cpu*() in preemptible context
  can: c_can: don't indicate triple sampling support for D_CAN
  can: sun4i: handle overrun in RX FIFO
  rbd: use GFP_NOIO for parent stat and data requests
  drm/vmwgfx: Fix Ubuntu 17.10 Wayland black screen issue
  Input: elan_i2c - add ELAN060C to the ACPI table
  MIPS: AR7: Ensure that serial ports are properly set up
  MIPS: AR7: Defer registration of GPIO
  tools: firmware: check for distro fallback udev cancel rule
  selftests: firmware: send expected errors to /dev/null
  selftests: firmware: add empty string and async tests
  test: firmware_class: report errors properly on failure
  MIPS: SMP: Fix deadlock & online race
  MIPS: Fix race on setting and getting cpu_online_mask
  MIPS: SMP: Use a completion event to signal CPU up
  MIPS: Fix CM region target definitions
  MIPS: microMIPS: Fix incorrect mask in insn_table_MM
  ALSA: seq: Avoid invalid lockdep class warning
  ALSA: seq: Fix OSS sysex delivery in OSS emulation
  ARM: 8720/1: ensure dump_instr() checks addr_limit
  KEYS: fix NULL pointer dereference during ASN.1 parsing [ver #2]
  crypto: x86/sha1-mb - fix panic due to unaligned access
  workqueue: Fix NULL pointer dereference
  x86/uaccess, sched/preempt: Verify access_ok() context
  platform/x86: hp-wmi: Do not shadow error values
  platform/x86: hp-wmi: Fix error value for hp_wmi_tablet_state
  KEYS: trusted: fix writing past end of buffer in trusted_read()
  KEYS: trusted: sanitize all key material
  cdc_ncm: Set NTB format again after altsetting switch for Huawei devices
  platform/x86: hp-wmi: Fix detection for dock and tablet mode
  net: dsa: select NET_SWITCHDEV
  s390/qeth: issue STARTLAN as first IPA command
  IB/ipoib: Change list_del to list_del_init in the tx object
  Input: mpr121 - set missing event capability
  Input: mpr121 - handle multiple bits change of status register
  IPsec: do not ignore crypto err in ah4 input
  netfilter: nft_meta: deal with PACKET_LOOPBACK in netdev family
  usb: hcd: initialize hcd->flags to 0 when rm hcd
  serial: sh-sci: Fix register offsets for the IRDA serial port
  phy: increase size of MII_BUS_ID_SIZE and bus_id
  iio: trigger: free trigger resource correctly
  crypto: vmx - disable preemption to enable vsx in aes_ctr.c
  ARM: omap2plus_defconfig: Fix probe errors on UARTs 5 and 6
  powerpc/corenet: explicitly disable the SDHC controller on kmcoge4
  iommu/arm-smmu-v3: Clear prior settings when updating STEs
  KVM: PPC: Book 3S: XICS: correct the real mode ICP rejecting counter
  drm: drm_minor_register(): Clean up debugfs on failure
  xen/netback: set default upper limit of tx/rx queues to 8
  PCI: mvebu: Handle changes to the bridge windows while enabled
  video: fbdev: pmag-ba-fb: Remove bad `__init' annotation
  adv7604: Initialize drive strength to default when using DT
  ANDROID: binder: clarify deferred thread work.
  FROMLIST: arm64: Avoid aligning normal memory pointers in __memcpy_{to,from}io

Conflicts:
	arch/arm64/kernel/io.c

Change-Id: I992fcf368dbc672ad7d6ae31e87f289f4d7df2bf
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2017-12-26 17:23:34 +05:30
commit 202fde333d
61 changed files with 465 additions and 263 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 97
SUBLEVEL = 98
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -221,6 +221,7 @@ CONFIG_SERIO=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
CONFIG_SERIAL_8250_RUNTIME_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y

View file

@ -133,30 +133,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
* Note that we now dump the code first, just in case the backtrace
* kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
bad = __get_user(val, &((u16 *)addr)[i]);
bad = get_user(val, &((u16 *)addr)[i]);
else
bad = __get_user(val, &((u32 *)addr)[i]);
bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@ -167,8 +163,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
}
set_fs(fs);
static void dump_instr(const char *lvl, struct pt_regs *regs)
{
mm_segment_t fs;
if (!user_mode(regs)) {
fs = get_fs();
set_fs(KERNEL_DS);
__dump_instr(lvl, regs);
set_fs(fs);
} else {
__dump_instr(lvl, regs);
}
}
#ifdef CONFIG_ARM_UNWIND

View file

@ -26,8 +26,7 @@
*/
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
!IS_ALIGNED((unsigned long)to, 8))) {
while (count && !IS_ALIGNED((unsigned long)from, 8)) {
*(u8 *)to = __raw_readb_no_log(from);
from++;
to++;
@ -55,23 +54,22 @@ EXPORT_SYMBOL(__memcpy_fromio);
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
!IS_ALIGNED((unsigned long)from, 8))) {
__raw_writeb_no_log(*(volatile u8 *)from, to);
while (count && !IS_ALIGNED((unsigned long)to, 8)) {
__raw_writeb_no_log(*(u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
__raw_writeq_no_log(*(volatile u64 *)from, to);
__raw_writeq_no_log(*(u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
__raw_writeb_no_log(*(volatile u8 *)from, to);
__raw_writeb_no_log(*(u8 *)from, to);
from++;
to++;
count--;

View file

@ -576,6 +576,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
@ -654,6 +655,10 @@ static int __init ar7_register_devices(void)
u32 val;
int res;
res = ar7_gpio_init();
if (res)
pr_warn("unable to register gpios: %d\n", res);
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);

View file

@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))

View file

@ -238,8 +238,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_BASE_CMDEFTGT_SHF 0
#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
#define CM_GCR_BASE_CMDEFTGT_MEM 1
#define CM_GCR_BASE_CMDEFTGT_MEM 0
#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3

View file

@ -49,9 +49,7 @@
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
/* What the heck is this check doing ? */
if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
play_dead();
play_dead();
}
#endif

View file

@ -64,6 +64,9 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
@ -174,9 +177,12 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
cpumask_set_cpu(cpu, &cpu_callin_map);
/* Notify boot CPU that we're starting & ready to sync counters */
complete(&cpu_starting);
synchronise_count_slave(cpu);
/* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@ -184,6 +190,12 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
/*
* Notify boot CPU that we're up & online and it can safely return
* from __cpu_up
*/
complete(&cpu_running);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
@ -242,22 +254,23 @@ void smp_prepare_boot_cpu(void)
{
set_cpu_possible(0, true);
set_cpu_online(0, true);
cpumask_set_cpu(0, &cpu_callin_map);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
/*
* Trust is futile. We should really have timeouts ...
*/
while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
udelay(100);
schedule();
/* Wait for CPU to start and be ready to sync counters */
if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(1000))) {
pr_crit("CPU%u: failed to start\n", cpu);
return -EIO;
}
synchronise_count_master(cpu);
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion(&cpu_running);
return 0;
}

View file

@ -75,7 +75,7 @@ static struct insn insn_table_MM[] = {
{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ld, 0, 0 },
{ insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
{ insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
{ insn_lld, 0, 0 },
{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },

View file

@ -83,6 +83,10 @@
};
};
sdhc@114000 {
status = "disabled";
};
i2c@119000 {
status = "disabled";
};

View file

@ -280,6 +280,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
*/
if (reject && reject != XICS_IPI) {
arch_spin_unlock(&ics->lock);
icp->n_reject++;
new_irq = reject;
goto again;
}
@ -611,10 +612,8 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
state = &ics->irq_state[src];
/* Still asserted, resend it */
if (state->asserted) {
icp->n_reject++;
if (state->asserted)
icp_rm_deliver_irq(xics, icp, irq);
}
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
icp->rm_action |= XICS_RM_NOTIFY_EOI;

View file

@ -165,7 +165,6 @@ static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_TE | SCSCR_RE,
.type = PORT_IRDA,
.ops = &sh770x_sci_port_ops,
.regshift = 1,
};
static struct resource scif2_resources[] = {

View file

@ -174,8 +174,8 @@ LABEL skip_ %I
.endr
# Find min length
vmovdqa _lens+0*16(state), %xmm0
vmovdqa _lens+1*16(state), %xmm1
vmovdqu _lens+0*16(state), %xmm0
vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@ -195,8 +195,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
vmovdqa %xmm0, _lens+0*16(state)
vmovdqa %xmm1, _lens+1*16(state)
vmovdqu %xmm0, _lens+0*16(state)
vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@ -260,8 +260,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
vmovdqa _lens(state), %xmm0
vmovdqa _lens+1*16(state), %xmm1
vmovdqu _lens(state), %xmm0
vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}

View file

@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <linux/preempt.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
@ -66,6 +67,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
#else
# define WARN_ON_IN_IRQ()
#endif
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
@ -86,8 +93,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
#define access_ok(type, addr, size) \
likely(!__range_not_ok(addr, size, user_addr_max()))
#define access_ok(type, addr, size) \
({ \
WARN_ON_IN_IRQ(); \
likely(!__range_not_ok(addr, size, user_addr_max())); \
})
/*
* The exception table consists of pairs of addresses relative to the

View file

@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
__this_cpu_read(cpu_info.x86_model) == 15) {
if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;

View file

@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pkcs7_free_message);
static int pkcs7_check_authattrs(struct pkcs7_message *msg)
{
struct pkcs7_signed_info *sinfo;
bool want;
bool want = false;
sinfo = msg->signed_infos;
if (!sinfo)

View file

@ -833,7 +833,7 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
* binder_enqueue_thread_work_ilocked_nowake() - Add thread work
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
* @thread: thread to queue work to
* @work: struct binder_work to add to list
*
@ -844,8 +844,8 @@ binder_enqueue_work_ilocked(struct binder_work *work,
* Requires the proc->inner_lock to be held.
*/
static void
binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread,
struct binder_work *work)
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
binder_enqueue_work_ilocked(work, &thread->todo);
}
@ -3348,7 +3348,14 @@ static void binder_transaction(struct binder_proc *proc,
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete);
/*
* Defer the TRANSACTION_COMPLETE, so we don't return to
* userspace immediately; this allows the target process to
* immediately start processing this transaction, reducing
* latency. We will then return the TRANSACTION_COMPLETE when
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;

View file

@ -2736,7 +2736,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@ -2863,7 +2863,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages))
return PTR_ERR(pages);

View file

@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
int ret;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
pagefault_enable();
preempt_enable();
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
return ret;
@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
pagefault_enable();
preempt_enable();
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx();
@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
&ctx->enc_key,
walk.iv);
pagefault_enable();
preempt_enable();
/* We need to update IV mostly for last bytes/round */
inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;

View file

@ -312,7 +312,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type)
ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
return ret;
goto err_debugfs;
}
ret = device_add(minor->kdev);

View file

@ -708,7 +708,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
mem_size *= 2;
mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =

View file

@ -58,7 +58,7 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info) {
ret = -ENOMEM;
goto error_put_trigger;
goto error_free_trigger;
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
@ -83,8 +83,8 @@ error_release_irq:
free_irq(irq, trig);
error_free_trig_info:
kfree(trig_info);
error_put_trigger:
iio_trigger_put(trig);
error_free_trigger:
iio_trigger_free(trig);
error_ret:
return ret;
}
@ -99,7 +99,7 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev)
iio_trigger_unregister(trig);
free_irq(trig_info->irq, trig);
kfree(trig_info);
iio_trigger_put(trig);
iio_trigger_free(trig);
return 0;
}

View file

@ -174,7 +174,7 @@ static int iio_sysfs_trigger_probe(int id)
return 0;
out2:
iio_trigger_put(t->trig);
iio_trigger_free(t->trig);
free_t:
kfree(t);
out1:

View file

@ -1373,7 +1373,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);

View file

@ -87,7 +87,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
struct mpr121_touchkey *mpr121 = dev_id;
struct i2c_client *client = mpr121->client;
struct input_dev *input = mpr121->input_dev;
unsigned int key_num, key_val, pressed;
unsigned long bit_changed;
unsigned int key_num;
int reg;
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
@ -105,19 +106,23 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
reg &= TOUCH_STATUS_MASK;
/* use old press bit to figure out which bit changed */
key_num = ffs(reg ^ mpr121->statusbits) - 1;
pressed = reg & (1 << key_num);
bit_changed = reg ^ mpr121->statusbits;
mpr121->statusbits = reg;
for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
unsigned int key_val, pressed;
key_val = mpr121->keycodes[key_num];
pressed = reg & BIT(key_num);
key_val = mpr121->keycodes[key_num];
input_event(input, EV_MSC, MSC_SCAN, key_num);
input_report_key(input, key_val, pressed);
input_event(input, EV_MSC, MSC_SCAN, key_num);
input_report_key(input, key_val, pressed);
dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
pressed ? "pressed" : "released");
}
input_sync(input);
dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
pressed ? "pressed" : "released");
out:
return IRQ_HANDLED;
}
@ -231,6 +236,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);

View file

@ -1240,6 +1240,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }

View file

@ -1033,13 +1033,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
}
/* Nuke the existing Config, as we're going to rewrite it */
val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
if (ste->valid)
val |= STRTAB_STE_0_V;
else
val &= ~STRTAB_STE_0_V;
/* Nuke the existing STE_0 value, as we're going to rewrite it */
val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@ -1068,7 +1063,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
}
if (ste->s2_cfg) {

View file

@ -2856,6 +2856,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
state->pdata.alt_data_sat = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH;
state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH;
state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH;
return 0;
}

View file

@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;

View file

@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;

View file

@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
/* reset the CAN IP by entering reset mode
* ignoring timeout error
*/
set_reset_mode(dev);
set_normal_mode(dev);
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & SUN4I_INT_RBUF_VLD) {
/* receive interrupt */
if ((isrc & SUN4I_INT_RBUF_VLD) &&
!(isrc & SUN4I_INT_DATA_OR)) {
/* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);

View file

@ -724,8 +724,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
u16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@ -823,6 +825,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
/*
* Some Huawei devices have been observed to come out of reset in NDP32 mode.
* Let's check if this is the case, and set the device to NDP16 mode again if
* needed.
*/
if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &curr_ntb_format, 2);
if (err < 0) {
goto error2;
}
if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
dev_info(&intf->dev, "resetting NTB format to 16-bit");
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
USB_CDC_NCM_NTB16_FORMAT,
iface_no, NULL, 0);
if (err < 0)
goto error2;
}
}
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {

View file

@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
/* Additionally, it has been reported that some Huawei E3372H devices, with
* firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
* needing to be set to the NTB16 one again.
*/
drvflags |= CDC_NCM_FLAG_RESET_NTB16;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;

View file

@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
#define MAX_QUEUES_DEFAULT 8
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
@ -2157,11 +2158,12 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
/* Allow as many queues as there are CPUs if user has not
/* Allow as many queues as there are CPUs but max. 8 if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
xenvif_max_queues = num_online_cpus();
xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
num_online_cpus());
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",

View file

@ -131,6 +131,12 @@ struct mvebu_pcie {
int nports;
};
struct mvebu_pcie_window {
phys_addr_t base;
phys_addr_t remap;
size_t size;
};
/* Structure representing one PCIe interface */
struct mvebu_pcie_port {
char *name;
@ -148,10 +154,8 @@ struct mvebu_pcie_port {
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
phys_addr_t memwin_base;
size_t memwin_size;
phys_addr_t iowin_base;
size_t iowin_size;
struct mvebu_pcie_window memwin;
struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
};
@ -377,23 +381,45 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
}
}
static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
unsigned int target, unsigned int attribute,
const struct mvebu_pcie_window *desired,
struct mvebu_pcie_window *cur)
{
if (desired->base == cur->base && desired->remap == cur->remap &&
desired->size == cur->size)
return;
if (cur->size != 0) {
mvebu_pcie_del_windows(port, cur->base, cur->size);
cur->size = 0;
cur->base = 0;
/*
* If something tries to change the window while it is enabled
* the change will not be done atomically. That would be
* difficult to do in the general case.
*/
}
if (desired->size == 0)
return;
mvebu_pcie_add_windows(port, target, attribute, desired->base,
desired->size, desired->remap);
*cur = *desired;
}
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
phys_addr_t iobase;
struct mvebu_pcie_window desired = {};
/* Are the new iobase/iolimit values invalid? */
if (port->bridge.iolimit < port->bridge.iobase ||
port->bridge.iolimitupper < port->bridge.iobaseupper ||
!(port->bridge.command & PCI_COMMAND_IO)) {
/* If a window was configured, remove it */
if (port->iowin_base) {
mvebu_pcie_del_windows(port, port->iowin_base,
port->iowin_size);
port->iowin_base = 0;
port->iowin_size = 0;
}
mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
return;
}
@ -410,32 +436,27 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
iobase = ((port->bridge.iobase & 0xF0) << 8) |
(port->bridge.iobaseupper << 16);
port->iowin_base = port->pcie->io.start + iobase;
port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
(port->bridge.iolimitupper << 16)) -
iobase) + 1;
desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
(port->bridge.iobaseupper << 16);
desired.base = port->pcie->io.start + desired.remap;
desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
(port->bridge.iolimitupper << 16)) -
desired.remap) +
1;
mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
port->iowin_base, port->iowin_size,
iobase);
mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
&port->iowin);
}
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
/* Are the new membase/memlimit values invalid? */
if (port->bridge.memlimit < port->bridge.membase ||
!(port->bridge.command & PCI_COMMAND_MEMORY)) {
/* If a window was configured, remove it */
if (port->memwin_base) {
mvebu_pcie_del_windows(port, port->memwin_base,
port->memwin_size);
port->memwin_base = 0;
port->memwin_size = 0;
}
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
return;
}
@ -445,14 +466,12 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
port->memwin_size =
(((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
port->memwin_base + 1;
desired.base = ((port->bridge.membase & 0xFFF0) << 16);
desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
port->memwin_base, port->memwin_size,
MVEBU_MBUS_NO_REMAP);
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
&port->memwin);
}
/*

View file

@ -249,7 +249,7 @@ static int hp_wmi_display_state(void)
int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return state;
}
@ -259,7 +259,7 @@ static int hp_wmi_hddtemp_state(void)
int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return state;
}
@ -269,7 +269,7 @@ static int hp_wmi_als_state(void)
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return state;
}
@ -280,7 +280,7 @@ static int hp_wmi_dock_state(void)
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return state & 0x1;
}
@ -291,7 +291,7 @@ static int hp_wmi_tablet_state(void)
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return ret;
return ret < 0 ? ret : -EINVAL;
return (state & 0x4) ? 1 : 0;
}
@ -324,7 +324,7 @@ static int __init hp_wmi_enable_hotkeys(void)
int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
sizeof(value), 0);
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return 0;
}
@ -337,7 +337,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
&query, sizeof(query), 0);
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return 0;
}
@ -429,7 +429,7 @@ static int hp_wmi_post_code_state(void)
int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return state;
}
@ -495,7 +495,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return count;
}
@ -516,7 +516,7 @@ static ssize_t set_postcode(struct device *dev, struct device_attribute *attr,
ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
return -EINVAL;
return ret < 0 ? ret : -EINVAL;
return count;
}
@ -573,10 +573,12 @@ static void hp_wmi_notify(u32 value, void *context)
switch (event_id) {
case HPWMI_DOCK_EVENT:
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
@ -649,6 +651,7 @@ static int __init hp_wmi_input_setup(void)
{
acpi_status status;
int err;
int val;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
@ -659,17 +662,26 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
/* Dock */
val = hp_wmi_dock_state();
if (!(val < 0)) {
__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
}
/* Tablet mode */
val = hp_wmi_tablet_state();
if (!(val < 0)) {
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
@ -982,10 +994,12 @@ static int hp_wmi_resume_handler(struct device *device)
* changed.
*/
if (hp_wmi_input_dev) {
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
}

View file

@ -909,7 +909,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),

View file

@ -2955,7 +2955,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
int qeth_send_startlan(struct qeth_card *card)
static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@ -2968,7 +2968,6 @@ int qeth_send_startlan(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@ -5080,6 +5079,20 @@ retriable:
goto out;
}
rc = qeth_send_startlan(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
if (rc == IPA_RC_LAN_OFFLINE) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
} else {
rc = -ENODEV;
goto out;
}
} else
card->lan_online = 1;
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@ -5091,14 +5104,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}

View file

@ -1203,21 +1203,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
rc = qeth_send_startlan(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
if (rc == 0xe080) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
goto contin;
}
rc = -ENODEV;
goto out_remove;
} else
card->lan_online = 1;
contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
if (qeth_l2_start_ipassists(card))

View file

@ -3298,21 +3298,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
rc = qeth_send_startlan(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
if (rc == 0xe080) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
goto contin;
}
rc = -ENODEV;
goto out_remove;
} else
card->lan_online = 1;
contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);

View file

@ -259,7 +259,7 @@ out_free_irq:
out1:
iio_trigger_unregister(st->trig);
out:
iio_trigger_put(st->trig);
iio_trigger_free(st->trig);
return ret;
}
@ -272,7 +272,7 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
peripheral_free(st->t->pin);
free_irq(st->irq, st);
iio_trigger_unregister(st->trig);
iio_trigger_put(st->trig);
iio_trigger_free(st->trig);
return 0;
}

View file

@ -163,18 +163,17 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
},
/*
* Common definitions for legacy IrDA ports, dependent on
* regshift value.
* Common definitions for legacy IrDA ports.
*/
[SCIx_IRDA_REGTYPE] = {
[SCSMR] = { 0x00, 8 },
[SCBRR] = { 0x01, 8 },
[SCSCR] = { 0x02, 8 },
[SCxTDR] = { 0x03, 8 },
[SCxSR] = { 0x04, 8 },
[SCxRDR] = { 0x05, 8 },
[SCFCR] = { 0x06, 8 },
[SCFDR] = { 0x07, 16 },
[SCBRR] = { 0x02, 8 },
[SCSCR] = { 0x04, 8 },
[SCxTDR] = { 0x06, 8 },
[SCxSR] = { 0x08, 16 },
[SCxRDR] = { 0x0a, 8 },
[SCFCR] = { 0x0c, 8 },
[SCFDR] = { 0x0e, 16 },
[SCTFDR] = sci_reg_invalid,
[SCRFDR] = sci_reg_invalid,
[SCSPTR] = sci_reg_invalid,

View file

@ -3057,6 +3057,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
}
usb_put_invalidate_rhdev(hcd);
hcd->flags = 0;
}
EXPORT_SYMBOL_GPL(usb_remove_hcd);

View file

@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = {
/*
* Turn the hardware cursor off.
*/
static void __init pmagbafb_erase_cursor(struct fb_info *info)
static void pmagbafb_erase_cursor(struct fb_info *info)
{
struct pmagbafb_par *par = info->par;

View file

@ -136,11 +136,7 @@ static inline const char *phy_modes(phy_interface_t interface)
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
/*
* Need to be a little smaller than phydev->dev.bus_id to leave room
* for the ":%02x"
*/
#define MII_BUS_ID_SIZE (20 - 3)
#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@ -599,7 +595,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
char bus_id[20];
char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);

View file

@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
* in_softirq - Are we currently processing softirq or have bh disabled?
* in_serving_softirq - Are we currently processing softirq?
*
* in_irq() - We're in (hard) IRQ context
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
* in_serving_softirq() - We're in softirq context
* in_nmi() - We're in NMI context
* in_task() - We're in task context
*
* Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
* should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
#define in_nmi() (preempt_count() & NMI_MASK)
#define in_task() (!(preempt_count() & \
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();

View file

@ -82,6 +82,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)

View file

@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */
#define SNDRV_SEQ_MAX_HOPS 10
/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff

View file

@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/preempt.h>
struct worker_pool;
@ -59,7 +60,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
if (current->flags & PF_WQ_WORKER)
if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}

View file

@ -227,7 +227,7 @@ next_op:
hdr = 2;
/* Extract a tag from the data */
if (unlikely(dp >= datalen - 1))
if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@ -273,7 +273,7 @@ next_op:
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
if (unlikely(dp >= datalen - n))
if (unlikely(n > datalen - dp))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {

View file

@ -65,14 +65,19 @@ static ssize_t trigger_request_store(struct device *dev,
release_firmware(test_firmware);
test_firmware = NULL;
rc = request_firmware(&test_firmware, name, dev);
if (rc)
if (rc) {
pr_info("load of '%s' failed: %d\n", name, rc);
pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
goto out;
}
pr_info("loaded: %zu\n", test_firmware->size);
rc = count;
out:
mutex_unlock(&test_fw_mutex);
kfree(name);
return count;
return rc;
}
static DEVICE_ATTR_WO(trigger_request);

View file

@ -1,12 +1,13 @@
config HAVE_NET_DSA
def_bool y
depends on NETDEVICES && !S390
depends on INET && NETDEVICES && !S390
# Drivers must select NET_DSA and the appropriate tagging format
config NET_DSA
tristate "Distributed Switch Architecture"
depends on HAVE_NET_DSA && NET_SWITCHDEV
depends on HAVE_NET_DSA
select NET_SWITCHDEV
select PHYLIB
---help---
Say Y if you want to enable support for the hardware switches supported

View file

@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err)
int ihl = ip_hdrlen(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
if (err)
goto out;
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, ihl);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);

View file

@ -151,8 +151,34 @@ void nft_meta_get_eval(const struct nft_expr *expr,
else
*dest = PACKET_BROADCAST;
break;
case NFPROTO_NETDEV:
switch (skb->protocol) {
case htons(ETH_P_IP): {
int noff = skb_network_offset(skb);
struct iphdr *iph, _iph;
iph = skb_header_pointer(skb, noff,
sizeof(_iph), &_iph);
if (!iph)
goto err;
if (ipv4_is_multicast(iph->daddr))
*dest = PACKET_MULTICAST;
else
*dest = PACKET_BROADCAST;
break;
}
case htons(ETH_P_IPV6):
*dest = PACKET_MULTICAST;
break;
default:
WARN_ON_ONCE(1);
goto err;
}
break;
default:
WARN_ON(1);
WARN_ON_ONCE(1);
goto err;
}
break;

View file

@ -69,7 +69,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
kfree(sdesc);
kzfree(sdesc);
return ret;
}
@ -113,7 +113,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
if (!ret)
ret = crypto_shash_final(&sdesc->shash, digest);
out:
kfree(sdesc);
kzfree(sdesc);
return ret;
}
@ -164,7 +164,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
paramdigest, TPM_NONCE_SIZE, h1,
TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
kfree(sdesc);
kzfree(sdesc);
return ret;
}
@ -245,7 +245,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
kfree(sdesc);
kzfree(sdesc);
return ret;
}
@ -346,7 +346,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
kfree(sdesc);
kzfree(sdesc);
return ret;
}
@ -563,7 +563,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
*bloblen = storedsize;
}
out:
kfree(td);
kzfree(td);
return ret;
}
@ -677,7 +677,7 @@ static int key_seal(struct trusted_key_payload *p,
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
kfree(tb);
kzfree(tb);
return ret;
}
@ -702,7 +702,7 @@ static int key_unseal(struct trusted_key_payload *p,
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
kfree(tb);
kzfree(tb);
return ret;
}
@ -984,12 +984,12 @@ static int trusted_instantiate(struct key *key,
if (!ret && options->pcrlock)
ret = pcrlock(options->pcrlock);
out:
kfree(datablob);
kfree(options);
kzfree(datablob);
kzfree(options);
if (!ret)
rcu_assign_keypointer(key, payload);
else
kfree(payload);
kzfree(payload);
return ret;
}
@ -998,8 +998,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
struct trusted_key_payload *p;
p = container_of(rcu, struct trusted_key_payload, rcu);
memset(p->key, 0, p->key_len);
kfree(p);
kzfree(p);
}
/*
@ -1041,13 +1040,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
kfree(new_p);
kzfree(new_p);
goto out;
}
if (!new_o->keyhandle) {
ret = -EINVAL;
kfree(new_p);
kzfree(new_p);
goto out;
}
@ -1061,22 +1060,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
ret = key_seal(new_p, new_o);
if (ret < 0) {
pr_info("trusted_key: key_seal failed (%d)\n", ret);
kfree(new_p);
kzfree(new_p);
goto out;
}
if (new_o->pcrlock) {
ret = pcrlock(new_o->pcrlock);
if (ret < 0) {
pr_info("trusted_key: pcrlock failed (%d)\n", ret);
kfree(new_p);
kzfree(new_p);
goto out;
}
}
rcu_assign_keypointer(key, new_p);
call_rcu(&p->rcu, trusted_rcu_free);
out:
kfree(datablob);
kfree(new_o);
kzfree(datablob);
kzfree(new_o);
return ret;
}
@ -1095,34 +1094,30 @@ static long trusted_read(const struct key *key, char __user *buffer,
p = rcu_dereference_key(key);
if (!p)
return -EINVAL;
if (!buffer || buflen <= 0)
return 2 * p->blob_len;
ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
if (!ascii_buf)
return -ENOMEM;
bufp = ascii_buf;
for (i = 0; i < p->blob_len; i++)
bufp = hex_byte_pack(bufp, p->blob[i]);
if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
kfree(ascii_buf);
return -EFAULT;
if (buffer && buflen >= 2 * p->blob_len) {
ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
if (!ascii_buf)
return -ENOMEM;
bufp = ascii_buf;
for (i = 0; i < p->blob_len; i++)
bufp = hex_byte_pack(bufp, p->blob[i]);
if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
kzfree(ascii_buf);
return -EFAULT;
}
kzfree(ascii_buf);
}
kfree(ascii_buf);
return 2 * p->blob_len;
}
/*
* trusted_destroy - before freeing the key, clear the decrypted data
* trusted_destroy - clear and free the key's payload
*/
static void trusted_destroy(struct key *key)
{
struct trusted_key_payload *p = key->payload.data[0];
if (!p)
return;
memset(p->key, 0, p->key_len);
kfree(key->payload.data[0]);
kzfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {

View file

@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
if (!dp->timer->running)
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
ev->data.ext.ptr, ev->data.ext.len);
snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)

View file

@ -117,6 +117,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
return 0;
}
/*
* put MIDI sysex bytes; the event buffer may be chained, thus it has
* to be expanded via snd_seq_dump_var_event().
*/
struct readq_sysex_ctx {
struct seq_oss_readq *readq;
int dev;
};
static int readq_dump_sysex(void *ptr, void *buf, int count)
{
struct readq_sysex_ctx *ctx = ptr;
return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
}
int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
struct snd_seq_event *ev)
{
struct readq_sysex_ctx ctx = {
.readq = q,
.dev = dev
};
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
return 0;
return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
}
/*
* copy an event to input queue:
* return zero if enqueued

View file

@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
struct snd_seq_event *ev);
int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);

View file

@ -48,8 +48,16 @@ echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
echo "$0: empty filename should not succeed" >&2
exit 1
fi
# Request a firmware that doesn't exist, it should fail.
echo -n "nope-$NAME" >"$DIR"/trigger_request
if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then
echo "$0: firmware shouldn't have loaded" >&2
exit 1
fi
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1

View file

@ -64,9 +64,33 @@ trap "test_finish" EXIT
echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
DEVPATH="$DIR"/"nope-$NAME"/loading
# Test failure when doing nothing (timeout works).
echo 1 >/sys/class/firmware/timeout
echo -n "$NAME" >"$DIR"/trigger_request
echo -n 2 >/sys/class/firmware/timeout
echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
# Give the kernel some time to load the loading file, must be less
# than the timeout above.
sleep 1
if [ ! -f $DEVPATH ]; then
echo "$0: fallback mechanism immediately cancelled"
echo ""
echo "The file never appeared: $DEVPATH"
echo ""
echo "This might be a distribution udev rule setup by your distribution"
echo "to immediately cancel all fallback requests, this must be"
echo "removed before running these tests. To confirm look for"
echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
echo "and see if you have something like this:"
echo ""
echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
echo ""
echo "If you do remove this file or comment out this line before"
echo "proceeding with these tests."
exit 1
fi
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1