This is the 4.4.160 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlu9oZ4ACgkQONu9yGCS aT5wmw/6As7cB5ufEFIVzCU3xJdf2yrD/+iaAY4fJUFWrgsqvImvwTeGyGm05AK2 /7VHaIW3ATmfLbgE4Qsq+eP/rfNPqkfDd7rVCIfrP3r51XhmP/e6/Mnfd3NN9K+O FbRDc5U9kirzItAUsm1z9ntCuZDRfMdbazDAHB7eFlO2DgmV+u+o5KbzoeGM4mRk IIDbdROW3sRmoPhubHBYZmGKFL+WNMxG/V1x+3iVnM1TNeGFgfR0NXaQ4s2lqdz8 tiJ0SNxcfEy/rAa1BgyuaKCcIXrD3OjaWOLYTB8Lr2PDn3WIyvpTw3sD2puCYWB9 zKLzKL/zPo4VK4wFAXZwbEhJuYrxRv4EsqyKKIdVzHeKtyMfHzMZg2uhnT1luLd8 yFiagE66H/Nn4SUznkD/bZNn1Zvyz7ME1AXq/L5go8HfuF2qVxaq/tczTJSCKsmH M195RmR6JJ9ZF63mvyfopdyErcPXmBjnOgVb7TNXRa3yNyjZBFXvAUQQg/ZPkidl 81WsNVRyOr2LKpHmhceEcrXICqLmederLW/ZYc3+Ti8GnCf0AVL1bcnwAFygqvfp Liq1YTWfqZl3/LHTCn1Jp3PduCgUAIREjP4g/YaHHJs+HfnZuvZcSa5maf1TieVk IYbVtzkeKW8nTMGQnDazMl/LVmjV0bsA8tLakDW4ClUKRxX4nNI= =99U3 -----END PGP SIGNATURE----- Merge 4.4.160 into android-4.4 Changes in 4.4.160 crypto: skcipher - Fix -Wstringop-truncation warnings tsl2550: fix lux1_input error in low light vmci: type promotion bug in qp_host_get_user_memory() x86/numa_emulation: Fix emulated-to-physical node mapping staging: rts5208: fix missing error check on call to rtsx_write_register uwb: hwa-rc: fix memory leak at probe power: vexpress: fix corruption in notifier registration Bluetooth: Add a new Realtek 8723DE ID 0bda:b009 USB: serial: kobil_sct: fix modem-status error handling 6lowpan: iphc: reset mac_header after decompress to fix panic md-cluster: clear another node's suspend_area after the copy is finished media: exynos4-is: Prevent NULL pointer dereference in __isp_video_try_fmt() powerpc/kdump: Handle crashkernel memory reservation failure media: fsl-viu: fix error handling in viu_of_probe() x86/tsc: Add missing header to tsc_msr.c x86/entry/64: Add two more instruction suffixes scsi: target/iscsi: Make iscsit_ta_authentication() respect the output buffer size scsi: klist: Make it safe to use klists in atomic context scsi: ibmvscsi: Improve strings handling usb: wusbcore: security: cast sizeof to int for comparison powerpc/powernv/ioda2: Reduce upper limit for DMA window size alarmtimer: Prevent overflow for relative nanosleep s390/extmem: fix gcc 8 stringop-overflow warning ALSA: snd-aoa: add of_node_put() in error path media: s3c-camif: ignore -ENOIOCTLCMD from v4l2_subdev_call for s_power media: soc_camera: ov772x: correct setting of banding filter media: omap3isp: zero-initialize the isp cam_xclk{a,b} initial data staging: android: ashmem: Fix mmap size validation drivers/tty: add error handling for pcmcia_loop_config media: tm6000: add error handling for dvb_register_adapter ALSA: hda: Add AZX_DCAPS_PM_RUNTIME for AMD Raven Ridge ath10k: protect ath10k_htt_rx_ring_free with rx_ring.lock rndis_wlan: potential buffer overflow in rndis_wlan_auth_indication() wlcore: Add missing PM call for wlcore_cmd_wait_for_event_or_timeout() ARM: mvebu: declare asm symbols as character arrays in pmsu.c HID: hid-ntrig: add error handling for sysfs_create_group scsi: bnx2i: add error handling for ioremap_nocache EDAC, i7core: Fix memleaks and use-after-free on probe and remove ASoC: dapm: Fix potential DAI widget pointer deref when linking DAIs module: exclude SHN_UNDEF symbols from kallsyms api nfsd: fix corrupted reply to badly ordered compound ARM: dts: dra7: fix DCAN node addresses floppy: Do not copy a kernel pointer to user memory in FDGETPRM ioctl serial: cpm_uart: return immediately from console poll spi: tegra20-slink: explicitly enable/disable clock spi: sh-msiof: Fix invalid SPI use during system suspend spi: sh-msiof: Fix handling of write value for SISTR register spi: rspi: Fix invalid SPI use during system suspend spi: rspi: Fix interrupted DMA transfers USB: fix error handling in usb_driver_claim_interface() USB: handle NULL config in usb_find_alt_setting() slub: make ->cpu_partial unsigned int media: uvcvideo: Support realtek's UVC 1.5 device USB: usbdevfs: sanitize flags more USB: usbdevfs: restore warning for nonsensical flags Revert "usb: cdc-wdm: Fix a sleep-in-atomic-context bug in service_outstanding_interrupt()" USB: remove LPM management from usb_driver_claim_interface() Input: elantech - enable middle button of touchpad on ThinkPad P72 IB/srp: Avoid that sg_reset -d ${srp_device} triggers an infinite loop scsi: target: iscsi: Use bin2hex instead of a re-implementation serial: imx: restore handshaking irq for imx1 arm64: KVM: Tighten guest core register access from userspace ext4: never move the system.data xattr out of the inode body thermal: of-thermal: disable passive polling when thermal zone is disabled net: hns: fix length and page_offset overflow when CONFIG_ARM64_64K_PAGES e1000: check on netif_running() before calling e1000_up() e1000: ensure to free old tx/rx rings in set_ringparam() hwmon: (ina2xx) fix sysfs shunt resistor read access hwmon: (adt7475) Make adt7475_read_word() return errors i2c: i801: Allow ACPI AML access I/O ports not reserved for SMBus arm64: cpufeature: Track 32bit EL0 support arm64: KVM: Sanitize PSTATE.M when being set from userspace media: v4l: event: Prevent freeing event subscriptions while accessed KVM: PPC: Book3S HV: Don't truncate HPTE index in xlate function mac80211: correct use of IEEE80211_VHT_CAP_RXSTBC_X mac80211_hwsim: correct use of IEEE80211_VHT_CAP_RXSTBC_X gpio: adp5588: Fix sleep-in-atomic-context bug mac80211: mesh: fix HWMP sequence numbering to follow standard cfg80211: nl80211_update_ft_ies() to validate NL80211_ATTR_IE RAID10 BUG_ON in raise_barrier when force is true and conf->barrier is 0 i2c: uniphier: issue STOP only for last message or I2C_M_STOP i2c: uniphier-f: issue STOP only for last message or I2C_M_STOP net: cadence: Fix a sleep-in-atomic-context bug in macb_halt_tx() fs/cifs: don't translate SFM_SLASH (U+F026) to backslash cfg80211: fix a type issue in ieee80211_chandef_to_operating_class() mac80211: fix a race between restart and CSA flows mac80211: Fix station bandwidth setting after channel switch mac80211: shorten the IBSS debug messages tools/vm/slabinfo.c: fix sign-compare warning tools/vm/page-types.c: fix "defined but not used" warning mm: madvise(MADV_DODUMP): allow hugetlbfs pages usb: gadget: fotg210-udc: Fix memory leak of fotg210->ep[i] perf probe powerpc: Ignore SyS symbols irrespective of endianness RDMA/ucma: check fd type in ucma_migrate_id() USB: yurex: Check for truncation in yurex_read() drm/nouveau/TBDdevinit: don't fail when PMU/PRE_OS is missing from VBIOS fs/cifs: suppress a string overflow warning dm thin metadata: try to avoid ever aborting transactions arch/hexagon: fix kernel/dma.c build warning hexagon: modify ffs() and fls() to return int arm64: jump_label.h: use asm_volatile_goto macro instead of "asm goto" r8169: Clear RTL_FLAG_TASK_*_PENDING when clearing RTL_FLAG_TASK_ENABLED s390/qeth: don't dump past end of unknown HW header cifs: read overflow in is_valid_oplock_break() xen/manage: don't complain about an empty value in control/sysrq node xen: avoid crash in disable_hotplug_cpu xen: fix GCC warning and remove duplicate EVTCHN_ROW/EVTCHN_COL usage smb2: fix missing files in root share directory listing ALSA: hda/realtek - Cannot adjust speaker's volume on Dell XPS 27 7760 crypto: mxs-dcp - Fix wait logic on chan threads proc: restrict kernel stack dumps to root ocfs2: fix locking for res->tracking and dlm->tracking_list dm thin metadata: fix __udivdi3 undefined on 32-bit Linux 4.4.160 Change-Id: I54d72945f741d6b4442adcd7bc18cb5417accb0f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
a94efb1c27
117 changed files with 814 additions and 285 deletions
|
@ -32,7 +32,7 @@ Supported chips:
|
|||
Datasheet: Publicly available at the Texas Instruments website
|
||||
http://www.ti.com/
|
||||
|
||||
Author: Lothar Felten <l-felten@ti.com>
|
||||
Author: Lothar Felten <lothar.felten@gmail.com>
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 159
|
||||
SUBLEVEL = 160
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -1549,7 +1549,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
dcan1: can@481cc000 {
|
||||
dcan1: can@4ae3c000 {
|
||||
compatible = "ti,dra7-d_can";
|
||||
ti,hwmods = "dcan1";
|
||||
reg = <0x4ae3c000 0x2000>;
|
||||
|
@ -1559,7 +1559,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
dcan2: can@481d0000 {
|
||||
dcan2: can@48480000 {
|
||||
compatible = "ti,dra7-d_can";
|
||||
ti,hwmods = "dcan2";
|
||||
reg = <0x48480000 0x2000>;
|
||||
|
|
|
@ -117,8 +117,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
|
|||
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
|
||||
}
|
||||
|
||||
extern unsigned char mvebu_boot_wa_start;
|
||||
extern unsigned char mvebu_boot_wa_end;
|
||||
extern unsigned char mvebu_boot_wa_start[];
|
||||
extern unsigned char mvebu_boot_wa_end[];
|
||||
|
||||
/*
|
||||
* This function sets up the boot address workaround needed for SMP
|
||||
|
@ -131,7 +131,7 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
|
|||
phys_addr_t resume_addr_reg)
|
||||
{
|
||||
void __iomem *sram_virt_base;
|
||||
u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
|
||||
u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
|
||||
|
||||
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
|
||||
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
#define ARM64_UNMAP_KERNEL_AT_EL0 23
|
||||
|
||||
#define ARM64_NCAPS 24
|
||||
|
@ -185,6 +186,11 @@ static inline bool cpu_supports_mixed_endian_el0(void)
|
|||
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
|
||||
}
|
||||
|
||||
static inline bool system_supports_32bit_el0(void)
|
||||
{
|
||||
return cpus_have_cap(ARM64_HAS_32BIT_EL0);
|
||||
}
|
||||
|
||||
static inline bool system_supports_mixed_endian_el0(void)
|
||||
{
|
||||
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
||||
{
|
||||
asm goto("1: nop\n\t"
|
||||
asm_volatile_goto("1: nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align 3\n\t"
|
||||
".quad 1b, %l[l_yes], %c0\n\t"
|
||||
|
@ -42,7 +42,7 @@ l_yes:
|
|||
|
||||
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
||||
{
|
||||
asm goto("1: b %l[l_yes]\n\t"
|
||||
asm_volatile_goto("1: b %l[l_yes]\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align 3\n\t"
|
||||
".quad 1b, %l[l_yes], %c0\n\t"
|
||||
|
|
|
@ -41,6 +41,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
|||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !(vcpu->arch.hcr_el2 & HCR_RW);
|
||||
}
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
|
|
|
@ -115,6 +115,7 @@
|
|||
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
|
||||
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
|
||||
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
|
||||
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
|
||||
|
||||
/* id_aa64mmfr0 */
|
||||
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
||||
|
|
|
@ -741,6 +741,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.matches = unmap_kernel_at_el0,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.desc = "32-bit EL0 Support",
|
||||
.capability = ARM64_HAS_32BIT_EL0,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
||||
.field_pos = ID_AA64PFR0_EL0_SHIFT,
|
||||
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -48,6 +48,45 @@ static u64 core_reg_offset_from_id(u64 id)
|
|||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int validate_core_offset(const struct kvm_one_reg *reg)
|
||||
{
|
||||
u64 off = core_reg_offset_from_id(reg->id);
|
||||
int size;
|
||||
|
||||
switch (off) {
|
||||
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(regs.regs[30]):
|
||||
case KVM_REG_ARM_CORE_REG(regs.sp):
|
||||
case KVM_REG_ARM_CORE_REG(regs.pc):
|
||||
case KVM_REG_ARM_CORE_REG(regs.pstate):
|
||||
case KVM_REG_ARM_CORE_REG(sp_el1):
|
||||
case KVM_REG_ARM_CORE_REG(elr_el1):
|
||||
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
|
||||
size = sizeof(__u64);
|
||||
break;
|
||||
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
|
||||
size = sizeof(__uint128_t);
|
||||
break;
|
||||
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
|
||||
size = sizeof(__u32);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) == size &&
|
||||
IS_ALIGNED(off, size / sizeof(__u32)))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/*
|
||||
|
@ -67,6 +106,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (validate_core_offset(reg))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -89,6 +131,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (validate_core_offset(reg))
|
||||
return -EINVAL;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -98,17 +143,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
}
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
||||
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
|
||||
u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_USR:
|
||||
if (!system_supports_32bit_el0())
|
||||
return -EINVAL;
|
||||
break;
|
||||
case COMPAT_PSR_MODE_FIQ:
|
||||
case COMPAT_PSR_MODE_IRQ:
|
||||
case COMPAT_PSR_MODE_SVC:
|
||||
case COMPAT_PSR_MODE_ABT:
|
||||
case COMPAT_PSR_MODE_UND:
|
||||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_MODE_EL0t:
|
||||
case PSR_MODE_EL1t:
|
||||
case PSR_MODE_EL1h:
|
||||
if (vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -211,7 +211,7 @@ static inline long ffz(int x)
|
|||
* This is defined the same way as ffs.
|
||||
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
||||
*/
|
||||
static inline long fls(int x)
|
||||
static inline int fls(int x)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -232,7 +232,7 @@ static inline long fls(int x)
|
|||
* the libc and compiler builtin ffs routines, therefore
|
||||
* differs in spirit from the above ffz (man ffs).
|
||||
*/
|
||||
static inline long ffs(int x)
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
panic("Can't create %s() memory pool!", __func__);
|
||||
else
|
||||
gen_pool_add(coherent_pool,
|
||||
pfn_to_virt(max_low_pfn),
|
||||
(unsigned long)pfn_to_virt(max_low_pfn),
|
||||
hexagon_coherent_pool_size, -1);
|
||||
}
|
||||
|
||||
|
|
|
@ -186,7 +186,12 @@ void __init reserve_crashkernel(void)
|
|||
(unsigned long)(crashk_res.start >> 20),
|
||||
(unsigned long)(memblock_phys_mem_size() >> 20));
|
||||
|
||||
memblock_reserve(crashk_res.start, crash_size);
|
||||
if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
|
||||
memblock_reserve(crashk_res.start, crash_size)) {
|
||||
pr_err("Failed to reserve memory for crashkernel!\n");
|
||||
crashk_res.start = crashk_res.end = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int overlaps_crashkernel(unsigned long start, unsigned long size)
|
||||
|
|
|
@ -314,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
unsigned long pp, key;
|
||||
unsigned long v, gr;
|
||||
__be64 *hptep;
|
||||
int index;
|
||||
long int index;
|
||||
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
|
||||
|
||||
/* Get SLB entry */
|
||||
|
|
|
@ -2270,7 +2270,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
if ((level_shift - 3) * levels + page_shift >= 55)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
|
|
|
@ -79,7 +79,7 @@ struct qin64 {
|
|||
struct dcss_segment {
|
||||
struct list_head list;
|
||||
char dcss_name[8];
|
||||
char res_name[15];
|
||||
char res_name[16];
|
||||
unsigned long start_addr;
|
||||
unsigned long end;
|
||||
atomic_t ref_count;
|
||||
|
@ -434,7 +434,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
|
|||
memcpy(&seg->res_name, seg->dcss_name, 8);
|
||||
EBCASC(seg->res_name, 8);
|
||||
seg->res_name[8] = '\0';
|
||||
strncat(seg->res_name, " (DCSS)", 7);
|
||||
strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
|
||||
seg->res->name = seg->res_name;
|
||||
rc = seg->vm_segtype;
|
||||
if (rc == SEG_TYPE_SC ||
|
||||
|
|
|
@ -90,7 +90,7 @@ ENDPROC(native_usergs_sysret64)
|
|||
.endm
|
||||
|
||||
.macro TRACE_IRQS_IRETQ_DEBUG
|
||||
bt $9, EFLAGS(%rsp) /* interrupts off? */
|
||||
btl $9, EFLAGS(%rsp) /* interrupts off? */
|
||||
jnc 1f
|
||||
TRACE_IRQS_ON_DEBUG
|
||||
1:
|
||||
|
@ -620,7 +620,7 @@ retint_kernel:
|
|||
#ifdef CONFIG_PREEMPT
|
||||
/* Interrupts are off */
|
||||
/* Check if we need preemption */
|
||||
bt $9, EFLAGS(%rsp) /* were interrupts off? */
|
||||
btl $9, EFLAGS(%rsp) /* were interrupts off? */
|
||||
jnc 1f
|
||||
0: cmpl $0, PER_CPU_VAR(__preempt_count)
|
||||
jnz 1f
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/param.h>
|
||||
#include <asm/tsc.h>
|
||||
|
||||
/* CPU reference clock frequency: in KHz */
|
||||
#define FREQ_83 83200
|
||||
|
|
|
@ -60,7 +60,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
|
|||
eb->nid = nid;
|
||||
|
||||
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
|
||||
emu_nid_to_phys[nid] = nid;
|
||||
emu_nid_to_phys[nid] = pb->nid;
|
||||
|
||||
pb->start += size;
|
||||
if (pb->start >= pb->end) {
|
||||
|
|
|
@ -384,6 +384,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|||
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
|
||||
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
|
||||
sizeof(rblkcipher.geniv));
|
||||
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
|
||||
|
||||
rblkcipher.blocksize = alg->cra_blocksize;
|
||||
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
|
||||
|
@ -465,6 +466,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|||
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
|
||||
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
|
||||
sizeof(rblkcipher.geniv));
|
||||
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
|
||||
|
||||
rblkcipher.blocksize = alg->cra_blocksize;
|
||||
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
|
||||
|
|
|
@ -536,6 +536,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|||
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
|
||||
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
|
||||
sizeof(rblkcipher.geniv));
|
||||
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
|
||||
|
||||
rblkcipher.blocksize = alg->cra_blocksize;
|
||||
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
||||
|
|
|
@ -3459,6 +3459,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
|
|||
(struct floppy_struct **)&outparam);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(&inparam.g, outparam,
|
||||
offsetof(struct floppy_struct, name));
|
||||
outparam = &inparam.g;
|
||||
break;
|
||||
case FDMSGON:
|
||||
UDP->flags |= FTD_MSG;
|
||||
|
|
|
@ -340,6 +340,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8723DE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8821AE Bluetooth devices */
|
||||
|
|
|
@ -63,7 +63,7 @@ struct dcp {
|
|||
struct dcp_coherent_block *coh;
|
||||
|
||||
struct completion completion[DCP_MAX_CHANS];
|
||||
struct mutex mutex[DCP_MAX_CHANS];
|
||||
spinlock_t lock[DCP_MAX_CHANS];
|
||||
struct task_struct *thread[DCP_MAX_CHANS];
|
||||
struct crypto_queue queue[DCP_MAX_CHANS];
|
||||
};
|
||||
|
@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
|
|||
|
||||
int ret;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
|
|||
if (arq) {
|
||||
ret = mxs_dcp_aes_block_crypt(arq);
|
||||
arq->complete(arq, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -407,9 +411,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
|||
rctx->ecb = ecb;
|
||||
actx->chan = DCP_CHAN_CRYPTO;
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
|
||||
|
@ -645,13 +649,20 @@ static int dcp_chan_thread_sha(void *data)
|
|||
struct ahash_request *req;
|
||||
int ret, fini;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -663,12 +674,8 @@ static int dcp_chan_thread_sha(void *data)
|
|||
ret = dcp_sha_req_to_buf(arq);
|
||||
fini = rctx->fini;
|
||||
arq->complete(arq, ret);
|
||||
if (!fini)
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -726,9 +733,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
|
|||
rctx->init = 1;
|
||||
}
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
mutex_unlock(&actx->mutex);
|
||||
|
@ -984,7 +991,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, sdcp);
|
||||
|
||||
for (i = 0; i < DCP_MAX_CHANS; i++) {
|
||||
mutex_init(&sdcp->mutex[i]);
|
||||
spin_lock_init(&sdcp->lock[i]);
|
||||
init_completion(&sdcp->completion[i]);
|
||||
crypto_init_queue(&sdcp->queue[i], 50);
|
||||
}
|
||||
|
|
|
@ -1187,15 +1187,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
|
|||
|
||||
rc = device_add(pvt->addrmatch_dev);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
goto err_put_addrmatch;
|
||||
|
||||
if (!pvt->is_registered) {
|
||||
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
|
||||
GFP_KERNEL);
|
||||
if (!pvt->chancounts_dev) {
|
||||
put_device(pvt->addrmatch_dev);
|
||||
device_del(pvt->addrmatch_dev);
|
||||
return -ENOMEM;
|
||||
rc = -ENOMEM;
|
||||
goto err_del_addrmatch;
|
||||
}
|
||||
|
||||
pvt->chancounts_dev->type = &all_channel_counts_type;
|
||||
|
@ -1209,9 +1208,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
|
|||
|
||||
rc = device_add(pvt->chancounts_dev);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
goto err_put_chancounts;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_put_chancounts:
|
||||
put_device(pvt->chancounts_dev);
|
||||
err_del_addrmatch:
|
||||
device_del(pvt->addrmatch_dev);
|
||||
err_put_addrmatch:
|
||||
put_device(pvt->addrmatch_dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
|
||||
|
@ -1221,11 +1229,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
|
|||
edac_dbg(1, "\n");
|
||||
|
||||
if (!pvt->is_registered) {
|
||||
put_device(pvt->chancounts_dev);
|
||||
device_del(pvt->chancounts_dev);
|
||||
put_device(pvt->chancounts_dev);
|
||||
}
|
||||
put_device(pvt->addrmatch_dev);
|
||||
device_del(pvt->addrmatch_dev);
|
||||
put_device(pvt->addrmatch_dev);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -41,6 +41,8 @@ struct adp5588_gpio {
|
|||
uint8_t int_en[3];
|
||||
uint8_t irq_mask[3];
|
||||
uint8_t irq_stat[3];
|
||||
uint8_t int_input_en[3];
|
||||
uint8_t int_lvl_cached[3];
|
||||
};
|
||||
|
||||
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
|
||||
|
@ -177,12 +179,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
|
|||
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
|
||||
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
|
||||
if (dev->int_input_en[i]) {
|
||||
mutex_lock(&dev->lock);
|
||||
dev->dir[i] &= ~dev->int_input_en[i];
|
||||
dev->int_input_en[i] = 0;
|
||||
adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
|
||||
dev->dir[i]);
|
||||
mutex_unlock(&dev->lock);
|
||||
}
|
||||
|
||||
if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
|
||||
dev->int_lvl_cached[i] = dev->int_lvl[i];
|
||||
adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
|
||||
dev->int_lvl[i]);
|
||||
}
|
||||
|
||||
if (dev->int_en[i] ^ dev->irq_mask[i]) {
|
||||
dev->int_en[i] = dev->irq_mask[i];
|
||||
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
|
||||
dev->int_en[i]);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->irq_lock);
|
||||
}
|
||||
|
@ -225,9 +243,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
|
|||
else
|
||||
return -EINVAL;
|
||||
|
||||
adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
|
||||
adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
|
||||
dev->int_lvl[bank]);
|
||||
dev->int_input_en[bank] |= bit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -161,7 +161,8 @@ gm204_devinit_post(struct nvkm_devinit *base, bool post)
|
|||
}
|
||||
|
||||
/* load and execute some other ucode image (bios therm?) */
|
||||
return pmu_load(init, 0x01, post, NULL, NULL);
|
||||
pmu_load(init, 0x01, post, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_devinit_func
|
||||
|
|
|
@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||
|
||||
ret = sysfs_create_group(&hdev->dev.kobj,
|
||||
&ntrig_attribute_group);
|
||||
if (ret)
|
||||
hid_err(hdev, "cannot create sysfs group\n");
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
|
|
|
@ -274,14 +274,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
|
|||
return clamp_val(reg, 0, 1023) & (0xff << 2);
|
||||
}
|
||||
|
||||
static u16 adt7475_read_word(struct i2c_client *client, int reg)
|
||||
static int adt7475_read_word(struct i2c_client *client, int reg)
|
||||
{
|
||||
u16 val;
|
||||
int val1, val2;
|
||||
|
||||
val = i2c_smbus_read_byte_data(client, reg);
|
||||
val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
|
||||
val1 = i2c_smbus_read_byte_data(client, reg);
|
||||
if (val1 < 0)
|
||||
return val1;
|
||||
val2 = i2c_smbus_read_byte_data(client, reg + 1);
|
||||
if (val2 < 0)
|
||||
return val2;
|
||||
|
||||
return val;
|
||||
return val1 | (val2 << 8);
|
||||
}
|
||||
|
||||
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* Bi-directional Current/Power Monitor with I2C Interface
|
||||
* Datasheet: http://www.ti.com/product/ina230
|
||||
*
|
||||
* Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
|
||||
* Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
|
||||
* Thanks to Jan Volkering
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -328,6 +328,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t ina2xx_show_shunt(struct device *dev,
|
||||
struct device_attribute *da,
|
||||
char *buf)
|
||||
{
|
||||
struct ina2xx_data *data = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
|
||||
}
|
||||
|
||||
static ssize_t ina2xx_store_shunt(struct device *dev,
|
||||
struct device_attribute *da,
|
||||
const char *buf, size_t count)
|
||||
|
@ -402,7 +411,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
|
|||
|
||||
/* shunt resistance */
|
||||
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
|
||||
ina2xx_show_value, ina2xx_store_shunt,
|
||||
ina2xx_show_shunt, ina2xx_store_shunt,
|
||||
INA2XX_CALIBRATION);
|
||||
|
||||
/* update interval (ina226 only) */
|
||||
|
|
|
@ -1272,6 +1272,13 @@ static void i801_add_tco(struct i801_priv *priv)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
|
||||
acpi_physical_address address)
|
||||
{
|
||||
return address >= priv->smba &&
|
||||
address <= pci_resource_end(priv->pci_dev, SMBBAR);
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
|
||||
u64 *value, void *handler_context, void *region_context)
|
||||
|
@ -1287,7 +1294,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
|
|||
*/
|
||||
mutex_lock(&priv->acpi_lock);
|
||||
|
||||
if (!priv->acpi_reserved) {
|
||||
if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
|
||||
priv->acpi_reserved = true;
|
||||
|
||||
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
|
||||
|
|
|
@ -394,11 +394,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
|
|||
return ret;
|
||||
|
||||
for (msg = msgs; msg < emsg; msg++) {
|
||||
/* If next message is read, skip the stop condition */
|
||||
bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
|
||||
/* but, force it if I2C_M_STOP is set */
|
||||
if (msg->flags & I2C_M_STOP)
|
||||
stop = true;
|
||||
/* Emit STOP if it is the last message or I2C_M_STOP is set. */
|
||||
bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
|
||||
|
||||
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
|
||||
if (ret)
|
||||
|
|
|
@ -247,11 +247,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
|
|||
return ret;
|
||||
|
||||
for (msg = msgs; msg < emsg; msg++) {
|
||||
/* If next message is read, skip the stop condition */
|
||||
bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
|
||||
/* but, force it if I2C_M_STOP is set */
|
||||
if (msg->flags & I2C_M_STOP)
|
||||
stop = true;
|
||||
/* Emit STOP if it is the last message or I2C_M_STOP is set. */
|
||||
bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
|
||||
|
||||
ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
|
||||
if (ret)
|
||||
|
|
|
@ -123,6 +123,8 @@ static DEFINE_MUTEX(mut);
|
|||
static DEFINE_IDR(ctx_idr);
|
||||
static DEFINE_IDR(multicast_idr);
|
||||
|
||||
static const struct file_operations ucma_fops;
|
||||
|
||||
static inline struct ucma_context *_ucma_find_context(int id,
|
||||
struct ucma_file *file)
|
||||
{
|
||||
|
@ -1535,6 +1537,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
|
|||
f = fdget(cmd.fd);
|
||||
if (!f.file)
|
||||
return -ENOENT;
|
||||
if (f.file->f_op != &ucma_fops) {
|
||||
ret = -EINVAL;
|
||||
goto file_put;
|
||||
}
|
||||
|
||||
/* Validate current fd and prevent destruction of id. */
|
||||
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
|
||||
|
|
|
@ -2594,7 +2594,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|||
{
|
||||
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
||||
struct srp_rdma_ch *ch;
|
||||
int i;
|
||||
int i, j;
|
||||
u8 status;
|
||||
|
||||
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
|
||||
|
@ -2608,8 +2608,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|||
|
||||
for (i = 0; i < target->ch_count; i++) {
|
||||
ch = &target->ch[i];
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
struct srp_request *req = &ch->req_ring[i];
|
||||
for (j = 0; j < target->req_ring_size; ++j) {
|
||||
struct srp_request *req = &ch->req_ring[j];
|
||||
|
||||
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
|
||||
}
|
||||
|
|
|
@ -1180,6 +1180,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
|
|||
static const char * const middle_button_pnp_ids[] = {
|
||||
"LEN2131", /* ThinkPad P52 w/ NFC */
|
||||
"LEN2132", /* ThinkPad P52 */
|
||||
"LEN2133", /* ThinkPad P72 w/ NFC */
|
||||
"LEN2134", /* ThinkPad P72 */
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -189,6 +189,12 @@ struct dm_pool_metadata {
|
|||
unsigned long flags;
|
||||
sector_t data_block_size;
|
||||
|
||||
/*
|
||||
* We reserve a section of the metadata for commit overhead.
|
||||
* All reported space does *not* include this.
|
||||
*/
|
||||
dm_block_t metadata_reserve;
|
||||
|
||||
/*
|
||||
* Set if a transaction has to be aborted but the attempt to roll back
|
||||
* to the previous (good) transaction failed. The only pool metadata
|
||||
|
@ -827,6 +833,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
|
|||
return dm_tm_commit(pmd->tm, sblock);
|
||||
}
|
||||
|
||||
static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
int r;
|
||||
dm_block_t total;
|
||||
dm_block_t max_blocks = 4096; /* 16M */
|
||||
|
||||
r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
|
||||
if (r) {
|
||||
DMERR("could not get size of metadata device");
|
||||
pmd->metadata_reserve = max_blocks;
|
||||
} else
|
||||
pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
|
||||
}
|
||||
|
||||
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
||||
sector_t data_block_size,
|
||||
bool format_device)
|
||||
|
@ -860,6 +880,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
|||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
__set_metadata_reserve(pmd);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
|
@ -1763,6 +1785,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
|
|||
down_read(&pmd->root_lock);
|
||||
if (!pmd->fail_io)
|
||||
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
|
||||
|
||||
if (!r) {
|
||||
if (*result < pmd->metadata_reserve)
|
||||
*result = 0;
|
||||
else
|
||||
*result -= pmd->metadata_reserve;
|
||||
}
|
||||
up_read(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
|
@ -1875,8 +1904,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
|
|||
int r = -EINVAL;
|
||||
|
||||
down_write(&pmd->root_lock);
|
||||
if (!pmd->fail_io)
|
||||
if (!pmd->fail_io) {
|
||||
r = __resize_space_map(pmd->metadata_sm, new_count);
|
||||
if (!r)
|
||||
__set_metadata_reserve(pmd);
|
||||
}
|
||||
up_write(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
|
|||
enum pool_mode {
|
||||
PM_WRITE, /* metadata may be changed */
|
||||
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
|
||||
|
||||
/*
|
||||
* Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
|
||||
*/
|
||||
PM_OUT_OF_METADATA_SPACE,
|
||||
PM_READ_ONLY, /* metadata may not be changed */
|
||||
|
||||
PM_FAIL, /* all I/O fails */
|
||||
};
|
||||
|
||||
|
@ -1301,7 +1307,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
|||
|
||||
static void requeue_bios(struct pool *pool);
|
||||
|
||||
static void check_for_space(struct pool *pool)
|
||||
static bool is_read_only_pool_mode(enum pool_mode mode)
|
||||
{
|
||||
return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
|
||||
}
|
||||
|
||||
static bool is_read_only(struct pool *pool)
|
||||
{
|
||||
return is_read_only_pool_mode(get_pool_mode(pool));
|
||||
}
|
||||
|
||||
static void check_for_metadata_space(struct pool *pool)
|
||||
{
|
||||
int r;
|
||||
const char *ooms_reason = NULL;
|
||||
dm_block_t nr_free;
|
||||
|
||||
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
|
||||
if (r)
|
||||
ooms_reason = "Could not get free metadata blocks";
|
||||
else if (!nr_free)
|
||||
ooms_reason = "No free metadata blocks";
|
||||
|
||||
if (ooms_reason && !is_read_only(pool)) {
|
||||
DMERR("%s", ooms_reason);
|
||||
set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
|
||||
}
|
||||
}
|
||||
|
||||
static void check_for_data_space(struct pool *pool)
|
||||
{
|
||||
int r;
|
||||
dm_block_t nr_free;
|
||||
|
@ -1327,14 +1361,16 @@ static int commit(struct pool *pool)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (get_pool_mode(pool) >= PM_READ_ONLY)
|
||||
if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
|
||||
return -EINVAL;
|
||||
|
||||
r = dm_pool_commit_metadata(pool->pmd);
|
||||
if (r)
|
||||
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
|
||||
else
|
||||
check_for_space(pool);
|
||||
else {
|
||||
check_for_metadata_space(pool);
|
||||
check_for_data_space(pool);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1400,6 +1436,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
|
||||
if (r) {
|
||||
metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!free_blocks) {
|
||||
/* Let's commit before we use up the metadata reserve. */
|
||||
r = commit(pool);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1431,6 +1480,7 @@ static int should_error_unserviceable_bio(struct pool *pool)
|
|||
case PM_OUT_OF_DATA_SPACE:
|
||||
return pool->pf.error_if_no_space ? -ENOSPC : 0;
|
||||
|
||||
case PM_OUT_OF_METADATA_SPACE:
|
||||
case PM_READ_ONLY:
|
||||
case PM_FAIL:
|
||||
return -EIO;
|
||||
|
@ -2401,8 +2451,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
error_retry_list(pool);
|
||||
break;
|
||||
|
||||
case PM_OUT_OF_METADATA_SPACE:
|
||||
case PM_READ_ONLY:
|
||||
if (old_mode != new_mode)
|
||||
if (!is_read_only_pool_mode(old_mode))
|
||||
notify_of_pool_mode_change(pool, "read-only");
|
||||
dm_pool_metadata_read_only(pool->pmd);
|
||||
pool->process_bio = process_bio_read_only;
|
||||
|
@ -3333,6 +3384,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
|
|||
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
|
||||
dm_device_name(pool->pool_md),
|
||||
sb_metadata_dev_size, metadata_dev_size);
|
||||
|
||||
if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
|
||||
set_pool_mode(pool, PM_WRITE);
|
||||
|
||||
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
|
||||
if (r) {
|
||||
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
|
||||
|
@ -3636,7 +3691,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
|
|||
struct pool_c *pt = ti->private;
|
||||
struct pool *pool = pt->pool;
|
||||
|
||||
if (get_pool_mode(pool) >= PM_READ_ONLY) {
|
||||
if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
|
||||
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
|
||||
dm_device_name(pool->pool_md));
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -3710,6 +3765,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
|
|||
dm_block_t nr_blocks_data;
|
||||
dm_block_t nr_blocks_metadata;
|
||||
dm_block_t held_root;
|
||||
enum pool_mode mode;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
char buf2[BDEVNAME_SIZE];
|
||||
struct pool_c *pt = ti->private;
|
||||
|
@ -3780,9 +3836,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
|
|||
else
|
||||
DMEMIT("- ");
|
||||
|
||||
if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
|
||||
mode = get_pool_mode(pool);
|
||||
if (mode == PM_OUT_OF_DATA_SPACE)
|
||||
DMEMIT("out_of_data_space ");
|
||||
else if (pool->pf.mode == PM_READ_ONLY)
|
||||
else if (is_read_only_pool_mode(mode))
|
||||
DMEMIT("ro ");
|
||||
else
|
||||
DMEMIT("rw ");
|
||||
|
|
|
@ -239,15 +239,6 @@ static void recover_bitmaps(struct md_thread *thread)
|
|||
while (cinfo->recovery_map) {
|
||||
slot = fls64((u64)cinfo->recovery_map) - 1;
|
||||
|
||||
/* Clear suspend_area associated with the bitmap */
|
||||
spin_lock_irq(&cinfo->suspend_lock);
|
||||
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
|
||||
if (slot == s->slot) {
|
||||
list_del(&s->list);
|
||||
kfree(s);
|
||||
}
|
||||
spin_unlock_irq(&cinfo->suspend_lock);
|
||||
|
||||
snprintf(str, 64, "bitmap%04d", slot);
|
||||
bm_lockres = lockres_init(mddev, str, NULL, 1);
|
||||
if (!bm_lockres) {
|
||||
|
@ -266,6 +257,16 @@ static void recover_bitmaps(struct md_thread *thread)
|
|||
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
|
||||
goto dlm_unlock;
|
||||
}
|
||||
|
||||
/* Clear suspend_area associated with the bitmap */
|
||||
spin_lock_irq(&cinfo->suspend_lock);
|
||||
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
|
||||
if (slot == s->slot) {
|
||||
list_del(&s->list);
|
||||
kfree(s);
|
||||
}
|
||||
spin_unlock_irq(&cinfo->suspend_lock);
|
||||
|
||||
if (hi > 0) {
|
||||
/* TODO:Wait for current resync to get over */
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
|
|
|
@ -4336,11 +4336,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|||
allow_barrier(conf);
|
||||
}
|
||||
|
||||
raise_barrier(conf, 0);
|
||||
read_more:
|
||||
/* Now schedule reads for blocks from sector_nr to last */
|
||||
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
|
||||
r10_bio->state = 0;
|
||||
raise_barrier(conf, sectors_done != 0);
|
||||
raise_barrier(conf, 1);
|
||||
atomic_set(&r10_bio->remaining, 0);
|
||||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = sector_nr;
|
||||
|
@ -4445,6 +4446,8 @@ bio_full:
|
|||
if (sector_nr <= last)
|
||||
goto read_more;
|
||||
|
||||
lower_barrier(conf);
|
||||
|
||||
/* Now that we have done the whole section we can
|
||||
* update reshape_progress
|
||||
*/
|
||||
|
|
|
@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv,
|
|||
* set COM8
|
||||
*/
|
||||
if (priv->band_filter) {
|
||||
ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
|
||||
ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
|
||||
if (!ret)
|
||||
ret = ov772x_mask_set(client, BDBASE,
|
||||
0xff, 256 - priv->band_filter);
|
||||
|
|
|
@ -391,12 +391,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp,
|
|||
struct v4l2_pix_format_mplane *pixm,
|
||||
const struct fimc_fmt **fmt)
|
||||
{
|
||||
*fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
|
||||
const struct fimc_fmt *__fmt;
|
||||
|
||||
__fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
|
||||
|
||||
if (fmt)
|
||||
*fmt = __fmt;
|
||||
|
||||
pixm->colorspace = V4L2_COLORSPACE_SRGB;
|
||||
pixm->field = V4L2_FIELD_NONE;
|
||||
pixm->num_planes = (*fmt)->memplanes;
|
||||
pixm->pixelformat = (*fmt)->fourcc;
|
||||
pixm->num_planes = __fmt->memplanes;
|
||||
pixm->pixelformat = __fmt->fourcc;
|
||||
/*
|
||||
* TODO: double check with the docmentation these width/height
|
||||
* constraints are correct.
|
||||
|
|
|
@ -1417,7 +1417,7 @@ static int viu_of_probe(struct platform_device *op)
|
|||
sizeof(struct viu_reg), DRV_NAME)) {
|
||||
dev_err(&op->dev, "Error while requesting mem region\n");
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
/* remap registers */
|
||||
|
@ -1425,7 +1425,7 @@ static int viu_of_probe(struct platform_device *op)
|
|||
if (!viu_regs) {
|
||||
dev_err(&op->dev, "Can't map register set\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
/* Prepare our private structure */
|
||||
|
@ -1433,7 +1433,7 @@ static int viu_of_probe(struct platform_device *op)
|
|||
if (!viu_dev) {
|
||||
dev_err(&op->dev, "Can't allocate private structure\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
viu_dev->vr = viu_regs;
|
||||
|
@ -1449,16 +1449,21 @@ static int viu_of_probe(struct platform_device *op)
|
|||
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
|
||||
if (ret < 0) {
|
||||
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
|
||||
goto err;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
ad = i2c_get_adapter(0);
|
||||
if (!ad) {
|
||||
ret = -EFAULT;
|
||||
dev_err(&op->dev, "couldn't get i2c adapter\n");
|
||||
goto err_v4l2;
|
||||
}
|
||||
|
||||
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
|
||||
if (viu_dev->hdl.error) {
|
||||
ret = viu_dev->hdl.error;
|
||||
dev_err(&op->dev, "couldn't register control\n");
|
||||
goto err_vdev;
|
||||
goto err_i2c;
|
||||
}
|
||||
/* This control handler will inherit the control(s) from the
|
||||
sub-device(s). */
|
||||
|
@ -1476,7 +1481,7 @@ static int viu_of_probe(struct platform_device *op)
|
|||
vdev = video_device_alloc();
|
||||
if (vdev == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_vdev;
|
||||
goto err_hdl;
|
||||
}
|
||||
|
||||
*vdev = viu_template;
|
||||
|
@ -1497,7 +1502,7 @@ static int viu_of_probe(struct platform_device *op)
|
|||
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
|
||||
if (ret < 0) {
|
||||
video_device_release(viu_dev->vdev);
|
||||
goto err_vdev;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* enable VIU clock */
|
||||
|
@ -1505,12 +1510,12 @@ static int viu_of_probe(struct platform_device *op)
|
|||
if (IS_ERR(clk)) {
|
||||
dev_err(&op->dev, "failed to lookup the clock!\n");
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
goto err_vdev;
|
||||
}
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret) {
|
||||
dev_err(&op->dev, "failed to enable the clock!\n");
|
||||
goto err_clk;
|
||||
goto err_vdev;
|
||||
}
|
||||
viu_dev->clk = clk;
|
||||
|
||||
|
@ -1521,7 +1526,7 @@ static int viu_of_probe(struct platform_device *op)
|
|||
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
|
||||
dev_err(&op->dev, "Request VIU IRQ failed.\n");
|
||||
ret = -ENODEV;
|
||||
goto err_irq;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
mutex_unlock(&viu_dev->lock);
|
||||
|
@ -1529,16 +1534,19 @@ static int viu_of_probe(struct platform_device *op)
|
|||
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
|
||||
return ret;
|
||||
|
||||
err_irq:
|
||||
clk_disable_unprepare(viu_dev->clk);
|
||||
err_clk:
|
||||
video_unregister_device(viu_dev->vdev);
|
||||
clk_disable_unprepare(viu_dev->clk);
|
||||
err_vdev:
|
||||
v4l2_ctrl_handler_free(&viu_dev->hdl);
|
||||
video_unregister_device(viu_dev->vdev);
|
||||
err_unlock:
|
||||
mutex_unlock(&viu_dev->lock);
|
||||
err_hdl:
|
||||
v4l2_ctrl_handler_free(&viu_dev->hdl);
|
||||
err_i2c:
|
||||
i2c_put_adapter(ad);
|
||||
err_v4l2:
|
||||
v4l2_device_unregister(&viu_dev->v4l2_dev);
|
||||
err:
|
||||
err_irq:
|
||||
irq_dispose_mapping(viu_irq);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -303,7 +303,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
|
|||
static int isp_xclk_init(struct isp_device *isp)
|
||||
{
|
||||
struct device_node *np = isp->dev->of_node;
|
||||
struct clk_init_data init;
|
||||
struct clk_init_data init = { 0 };
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
|
||||
|
|
|
@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on)
|
|||
|
||||
if (camif->sensor.power_count == !on)
|
||||
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
|
||||
if (err == -ENOIOCTLCMD)
|
||||
err = 0;
|
||||
if (!err)
|
||||
sensor->power_count += on ? 1 : -1;
|
||||
|
||||
|
|
|
@ -275,6 +275,11 @@ static int register_dvb(struct tm6000_core *dev)
|
|||
|
||||
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
|
||||
THIS_MODULE, &dev->udev->dev, adapter_nr);
|
||||
if (ret < 0) {
|
||||
pr_err("tm6000: couldn't register the adapter!\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
dvb->adapter.priv = dev;
|
||||
|
||||
if (dvb->frontend) {
|
||||
|
|
|
@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
|
|||
}
|
||||
}
|
||||
|
||||
static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
|
||||
{
|
||||
/*
|
||||
* Return the size of the video probe and commit controls, which depends
|
||||
* on the protocol version.
|
||||
*/
|
||||
if (stream->dev->uvc_version < 0x0110)
|
||||
return 26;
|
||||
else if (stream->dev->uvc_version < 0x0150)
|
||||
return 34;
|
||||
else
|
||||
return 48;
|
||||
}
|
||||
|
||||
static int uvc_get_video_ctrl(struct uvc_streaming *stream,
|
||||
struct uvc_streaming_control *ctrl, int probe, __u8 query)
|
||||
{
|
||||
__u16 size = uvc_video_ctrl_size(stream);
|
||||
__u8 *data;
|
||||
__u16 size;
|
||||
int ret;
|
||||
|
||||
size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
|
||||
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
|
||||
query == UVC_GET_DEF)
|
||||
return -EIO;
|
||||
|
@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
|
|||
ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
|
||||
ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
|
||||
|
||||
if (size == 34) {
|
||||
if (size >= 34) {
|
||||
ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
|
||||
ctrl->bmFramingInfo = data[30];
|
||||
ctrl->bPreferedVersion = data[31];
|
||||
|
@ -254,11 +267,10 @@ out:
|
|||
static int uvc_set_video_ctrl(struct uvc_streaming *stream,
|
||||
struct uvc_streaming_control *ctrl, int probe)
|
||||
{
|
||||
__u16 size = uvc_video_ctrl_size(stream);
|
||||
__u8 *data;
|
||||
__u16 size;
|
||||
int ret;
|
||||
|
||||
size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
|
||||
data = kzalloc(size, GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
|
|||
put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
|
||||
put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
|
||||
|
||||
if (size == 34) {
|
||||
if (size >= 34) {
|
||||
put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
|
||||
data[30] = ctrl->bmFramingInfo;
|
||||
data[31] = ctrl->bPreferedVersion;
|
||||
|
|
|
@ -119,14 +119,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
|
|||
if (sev == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the event has been added to the fh->subscribed list, but its
|
||||
* add op has not completed yet elems will be 0, treat this as
|
||||
* not being subscribed.
|
||||
*/
|
||||
if (!sev->elems)
|
||||
return;
|
||||
|
||||
/* Increase event sequence number on fh. */
|
||||
fh->sequence++;
|
||||
|
||||
|
@ -212,6 +204,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
|||
struct v4l2_subscribed_event *sev, *found_ev;
|
||||
unsigned long flags;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
if (sub->type == V4L2_EVENT_ALL)
|
||||
return -EINVAL;
|
||||
|
@ -229,31 +222,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
|||
sev->flags = sub->flags;
|
||||
sev->fh = fh;
|
||||
sev->ops = ops;
|
||||
sev->elems = elems;
|
||||
|
||||
mutex_lock(&fh->subscribe_lock);
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
if (!found_ev)
|
||||
list_add(&sev->list, &fh->subscribed);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
if (found_ev) {
|
||||
/* Already listening */
|
||||
kfree(sev);
|
||||
return 0; /* Already listening */
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (sev->ops && sev->ops->add) {
|
||||
int ret = sev->ops->add(sev, elems);
|
||||
ret = sev->ops->add(sev, elems);
|
||||
if (ret) {
|
||||
sev->ops = NULL;
|
||||
v4l2_event_unsubscribe(fh, sub);
|
||||
return ret;
|
||||
kfree(sev);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark as ready for use */
|
||||
sev->elems = elems;
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
list_add(&sev->list, &fh->subscribed);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&fh->subscribe_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
||||
|
||||
|
@ -292,6 +290,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
|||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&fh->subscribe_lock);
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
|
||||
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
|
@ -310,6 +310,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
|||
sev->ops->del(sev);
|
||||
|
||||
kfree(sev);
|
||||
mutex_unlock(&fh->subscribe_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
|
|||
INIT_LIST_HEAD(&fh->available);
|
||||
INIT_LIST_HEAD(&fh->subscribed);
|
||||
fh->sequence = -1;
|
||||
mutex_init(&fh->subscribe_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_init);
|
||||
|
||||
|
@ -93,6 +94,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
|
|||
if (fh->vdev == NULL)
|
||||
return;
|
||||
v4l2_event_unsubscribe_all(fh);
|
||||
mutex_destroy(&fh->subscribe_lock);
|
||||
fh->vdev = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
|
||||
|
|
|
@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
|
|||
} else
|
||||
lux = 0;
|
||||
else
|
||||
return -EAGAIN;
|
||||
return 0;
|
||||
|
||||
/* LUX range check */
|
||||
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
|
||||
|
|
|
@ -755,7 +755,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
|
|||
retval = get_user_pages_fast((uintptr_t) produce_uva,
|
||||
produce_q->kernel_if->num_pages, 1,
|
||||
produce_q->kernel_if->u.h.header_page);
|
||||
if (retval < produce_q->kernel_if->num_pages) {
|
||||
if (retval < (int)produce_q->kernel_if->num_pages) {
|
||||
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
|
||||
retval);
|
||||
qp_release_pages(produce_q->kernel_if->u.h.header_page,
|
||||
|
@ -767,7 +767,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
|
|||
retval = get_user_pages_fast((uintptr_t) consume_uva,
|
||||
consume_q->kernel_if->num_pages, 1,
|
||||
consume_q->kernel_if->u.h.header_page);
|
||||
if (retval < consume_q->kernel_if->num_pages) {
|
||||
if (retval < (int)consume_q->kernel_if->num_pages) {
|
||||
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
|
||||
retval);
|
||||
qp_release_pages(consume_q->kernel_if->u.h.header_page,
|
||||
|
|
|
@ -523,7 +523,7 @@ static int macb_halt_tx(struct macb *bp)
|
|||
if (!(status & MACB_BIT(TGO)))
|
||||
return 0;
|
||||
|
||||
usleep_range(10, 250);
|
||||
udelay(250);
|
||||
} while (time_before(halt_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -171,10 +171,10 @@ struct hnae_desc_cb {
|
|||
|
||||
/* priv data for the desc, e.g. skb when use with ip stack*/
|
||||
void *priv;
|
||||
u16 page_offset;
|
||||
u16 reuse_flag;
|
||||
u32 page_offset;
|
||||
u32 length; /* length of the buffer */
|
||||
|
||||
u16 length; /* length of the buffer */
|
||||
u16 reuse_flag;
|
||||
|
||||
/* desc type, used by the ring user to mark the type of the priv data */
|
||||
u16 type;
|
||||
|
|
|
@ -645,14 +645,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
|
|||
adapter->tx_ring = tx_old;
|
||||
e1000_free_all_rx_resources(adapter);
|
||||
e1000_free_all_tx_resources(adapter);
|
||||
kfree(tx_old);
|
||||
kfree(rx_old);
|
||||
adapter->rx_ring = rxdr;
|
||||
adapter->tx_ring = txdr;
|
||||
err = e1000_up(adapter);
|
||||
if (err)
|
||||
goto err_setup;
|
||||
}
|
||||
kfree(tx_old);
|
||||
kfree(rx_old);
|
||||
|
||||
clear_bit(__E1000_RESETTING, &adapter->flags);
|
||||
return 0;
|
||||
|
@ -665,7 +665,8 @@ err_setup_rx:
|
|||
err_alloc_rx:
|
||||
kfree(txdr);
|
||||
err_alloc_tx:
|
||||
e1000_up(adapter);
|
||||
if (netif_running(adapter->netdev))
|
||||
e1000_up(adapter);
|
||||
err_setup:
|
||||
clear_bit(__E1000_RESETTING, &adapter->flags);
|
||||
return err;
|
||||
|
|
|
@ -759,7 +759,7 @@ struct rtl8169_tc_offsets {
|
|||
};
|
||||
|
||||
enum rtl_flag {
|
||||
RTL_FLAG_TASK_ENABLED,
|
||||
RTL_FLAG_TASK_ENABLED = 0,
|
||||
RTL_FLAG_TASK_SLOW_PENDING,
|
||||
RTL_FLAG_TASK_RESET_PENDING,
|
||||
RTL_FLAG_TASK_PHY_PENDING,
|
||||
|
@ -7618,7 +7618,8 @@ static int rtl8169_close(struct net_device *dev)
|
|||
rtl8169_update_counters(dev);
|
||||
|
||||
rtl_lock_work(tp);
|
||||
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
|
||||
/* Clear all task flags */
|
||||
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
|
||||
|
||||
rtl8169_down(dev);
|
||||
rtl_unlock_work(tp);
|
||||
|
@ -7795,7 +7796,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
|
|||
|
||||
rtl_lock_work(tp);
|
||||
napi_disable(&tp->napi);
|
||||
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
|
||||
/* Clear all task flags */
|
||||
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
|
||||
|
||||
rtl_unlock_work(tp);
|
||||
|
||||
rtl_pll_power_down(tp);
|
||||
|
|
|
@ -212,11 +212,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
|
|||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
|
||||
htt->rx_ring.fill_cnt));
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
|
||||
if (ret)
|
||||
ath10k_htt_rx_ring_free(htt);
|
||||
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -230,7 +231,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
|
|||
skb_queue_purge(&htt->rx_compl_q);
|
||||
skb_queue_purge(&htt->rx_in_ord_compl_q);
|
||||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
ath10k_htt_rx_ring_free(htt);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
|
||||
dma_free_coherent(htt->ar->dev,
|
||||
(htt->rx_ring.size *
|
||||
|
|
|
@ -2453,9 +2453,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
|||
IEEE80211_VHT_CAP_SHORT_GI_80 |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_TXSTBC |
|
||||
IEEE80211_VHT_CAP_RXSTBC_1 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_2 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_3 |
|
||||
IEEE80211_VHT_CAP_RXSTBC_4 |
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
|
||||
sband->vht_cap.vht_mcs.rx_mcs_map =
|
||||
|
|
|
@ -2919,6 +2919,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
|
|||
|
||||
while (buflen >= sizeof(*auth_req)) {
|
||||
auth_req = (void *)buf;
|
||||
if (buflen < le32_to_cpu(auth_req->length))
|
||||
return;
|
||||
type = "unknown";
|
||||
flags = le32_to_cpu(auth_req->flags);
|
||||
pairwise_error = false;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "wl12xx_80211.h"
|
||||
#include "cmd.h"
|
||||
#include "event.h"
|
||||
#include "ps.h"
|
||||
#include "tx.h"
|
||||
#include "hw_ops.h"
|
||||
|
||||
|
@ -191,6 +192,10 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
|
|||
|
||||
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
|
||||
|
||||
ret = wl1271_ps_elp_wakeup(wl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
do {
|
||||
if (time_after(jiffies, timeout_time)) {
|
||||
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
|
||||
|
@ -222,6 +227,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
|
|||
} while (!event);
|
||||
|
||||
out:
|
||||
wl1271_ps_elp_sleep(wl);
|
||||
kfree(events_vector);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ static void vexpress_reset_do(struct device *dev, const char *what)
|
|||
}
|
||||
|
||||
static struct device *vexpress_power_off_device;
|
||||
static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
|
||||
|
||||
static void vexpress_power_off(void)
|
||||
{
|
||||
|
@ -99,10 +100,13 @@ static int _vexpress_register_restart_handler(struct device *dev)
|
|||
int err;
|
||||
|
||||
vexpress_restart_device = dev;
|
||||
err = register_restart_handler(&vexpress_restart_nb);
|
||||
if (err) {
|
||||
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
|
||||
return err;
|
||||
if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
|
||||
err = register_restart_handler(&vexpress_restart_nb);
|
||||
if (err) {
|
||||
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
|
||||
atomic_dec(&vexpress_restart_nb_refcnt);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
device_create_file(dev, &dev_attr_active);
|
||||
|
||||
|
|
|
@ -523,7 +523,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
|
|||
default:
|
||||
dev_kfree_skb_any(skb);
|
||||
QETH_CARD_TEXT(card, 3, "inbunkno");
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
|
||||
continue;
|
||||
}
|
||||
work_done++;
|
||||
|
|
|
@ -1902,7 +1902,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
|
|||
default:
|
||||
dev_kfree_skb_any(skb);
|
||||
QETH_CARD_TEXT(card, 3, "inbunkno");
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
|
||||
QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
|
||||
continue;
|
||||
}
|
||||
work_done++;
|
||||
|
|
|
@ -2742,6 +2742,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
|
|||
BNX2X_DOORBELL_PCI_BAR);
|
||||
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
|
||||
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
|
||||
if (!ep->qp.ctx_base)
|
||||
return -ENOMEM;
|
||||
goto arm_cq;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
|
|||
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
|
||||
static int fast_fail = 1;
|
||||
static int client_reserve = 1;
|
||||
static char partition_name[97] = "UNKNOWN";
|
||||
static char partition_name[96] = "UNKNOWN";
|
||||
static unsigned int partition_number = -1;
|
||||
|
||||
static struct scsi_transport_template *ibmvscsi_transport_template;
|
||||
|
@ -261,7 +261,7 @@ static void gather_partition_info(void)
|
|||
|
||||
ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
|
||||
if (ppartition_name)
|
||||
strncpy(partition_name, ppartition_name,
|
||||
strlcpy(partition_name, ppartition_name,
|
||||
sizeof(partition_name));
|
||||
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
|
||||
if (p_number_ptr)
|
||||
|
|
|
@ -587,11 +587,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
|
|||
|
||||
ret = wait_event_interruptible_timeout(rspi->wait,
|
||||
rspi->dma_callbacked, HZ);
|
||||
if (ret > 0 && rspi->dma_callbacked)
|
||||
if (ret > 0 && rspi->dma_callbacked) {
|
||||
ret = 0;
|
||||
else if (!ret) {
|
||||
dev_err(&rspi->master->dev, "DMA timeout\n");
|
||||
ret = -ETIMEDOUT;
|
||||
} else {
|
||||
if (!ret) {
|
||||
dev_err(&rspi->master->dev, "DMA timeout\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
if (tx)
|
||||
dmaengine_terminate_all(rspi->master->dma_tx);
|
||||
if (rx)
|
||||
|
@ -1303,12 +1305,36 @@ static const struct platform_device_id spi_driver_ids[] = {
|
|||
|
||||
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int rspi_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct rspi_data *rspi = platform_get_drvdata(pdev);
|
||||
|
||||
return spi_master_suspend(rspi->master);
|
||||
}
|
||||
|
||||
static int rspi_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct rspi_data *rspi = platform_get_drvdata(pdev);
|
||||
|
||||
return spi_master_resume(rspi->master);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
|
||||
#define DEV_PM_OPS &rspi_pm_ops
|
||||
#else
|
||||
#define DEV_PM_OPS NULL
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static struct platform_driver rspi_driver = {
|
||||
.probe = rspi_probe,
|
||||
.remove = rspi_remove,
|
||||
.id_table = spi_driver_ids,
|
||||
.driver = {
|
||||
.name = "renesas_spi",
|
||||
.pm = DEV_PM_OPS,
|
||||
.of_match_table = of_match_ptr(rspi_of_match),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -374,7 +374,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
|
|||
|
||||
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
|
||||
{
|
||||
sh_msiof_write(p, STR, sh_msiof_read(p, STR));
|
||||
sh_msiof_write(p, STR,
|
||||
sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
|
||||
}
|
||||
|
||||
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
|
||||
|
@ -1275,12 +1276,37 @@ static const struct platform_device_id spi_driver_ids[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int sh_msiof_spi_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
|
||||
|
||||
return spi_master_suspend(p->master);
|
||||
}
|
||||
|
||||
static int sh_msiof_spi_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
|
||||
|
||||
return spi_master_resume(p->master);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
|
||||
sh_msiof_spi_resume);
|
||||
#define DEV_PM_OPS &sh_msiof_spi_pm_ops
|
||||
#else
|
||||
#define DEV_PM_OPS NULL
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static struct platform_driver sh_msiof_spi_drv = {
|
||||
.probe = sh_msiof_spi_probe,
|
||||
.remove = sh_msiof_spi_remove,
|
||||
.id_table = spi_driver_ids,
|
||||
.driver = {
|
||||
.name = "spi_sh_msiof",
|
||||
.pm = DEV_PM_OPS,
|
||||
.of_match_table = of_match_ptr(sh_msiof_match),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
|||
goto exit_free_master;
|
||||
}
|
||||
|
||||
/* disabled clock may cause interrupt storm upon request */
|
||||
tspi->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(tspi->clk)) {
|
||||
ret = PTR_ERR(tspi->clk);
|
||||
dev_err(&pdev->dev, "Can not get clock %d\n", ret);
|
||||
goto exit_free_master;
|
||||
}
|
||||
ret = clk_prepare(tspi->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
|
||||
goto exit_free_master;
|
||||
}
|
||||
ret = clk_enable(tspi->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
|
||||
goto exit_free_master;
|
||||
}
|
||||
|
||||
spi_irq = platform_get_irq(pdev, 0);
|
||||
tspi->irq = spi_irq;
|
||||
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
|
||||
|
@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
|||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
|
||||
tspi->irq);
|
||||
goto exit_free_master;
|
||||
}
|
||||
|
||||
tspi->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(tspi->clk)) {
|
||||
dev_err(&pdev->dev, "can not get clock\n");
|
||||
ret = PTR_ERR(tspi->clk);
|
||||
goto exit_free_irq;
|
||||
goto exit_clk_disable;
|
||||
}
|
||||
|
||||
tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
|
||||
|
@ -1138,6 +1149,8 @@ exit_rx_dma_free:
|
|||
tegra_slink_deinit_dma_param(tspi, true);
|
||||
exit_free_irq:
|
||||
free_irq(spi_irq, tspi);
|
||||
exit_clk_disable:
|
||||
clk_disable(tspi->clk);
|
||||
exit_free_master:
|
||||
spi_master_put(master);
|
||||
return ret;
|
||||
|
@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
|
|||
|
||||
free_irq(tspi->irq, tspi);
|
||||
|
||||
clk_disable(tspi->clk);
|
||||
|
||||
if (tspi->tx_dma_chan)
|
||||
tegra_slink_deinit_dma_param(tspi, false);
|
||||
|
||||
|
|
|
@ -370,6 +370,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* requested mapping size larger than object size */
|
||||
if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* requested protection bits must match our allowed protection mask */
|
||||
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
|
||||
calc_vm_prot_bits(PROT_MASK))) {
|
||||
|
|
|
@ -5031,7 +5031,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
|
|||
goto SD_Execute_Write_Cmd_Failed;
|
||||
}
|
||||
|
||||
rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
|
||||
retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
|
||||
if (retval != STATUS_SUCCESS) {
|
||||
rtsx_trace(chip);
|
||||
goto SD_Execute_Write_Cmd_Failed;
|
||||
|
|
|
@ -26,15 +26,6 @@
|
|||
#include "iscsi_target_nego.h"
|
||||
#include "iscsi_target_auth.h"
|
||||
|
||||
static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < src_len; i++) {
|
||||
sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
static void chap_gen_challenge(
|
||||
struct iscsi_conn *conn,
|
||||
int caller,
|
||||
|
@ -47,7 +38,7 @@ static void chap_gen_challenge(
|
|||
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
|
||||
|
||||
get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
|
||||
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
|
||||
bin2hex(challenge_asciihex, chap->challenge,
|
||||
CHAP_CHALLENGE_LENGTH);
|
||||
/*
|
||||
* Set CHAP_C, and copy the generated challenge into c_str.
|
||||
|
@ -287,7 +278,7 @@ static int chap_server_compute_md5(
|
|||
}
|
||||
crypto_free_hash(tfm);
|
||||
|
||||
chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
|
||||
bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
|
||||
pr_debug("[server] MD5 Server Digest: %s\n", response);
|
||||
|
||||
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
|
||||
|
@ -431,7 +422,7 @@ static int chap_server_compute_md5(
|
|||
/*
|
||||
* Convert response from binary hex to ascii hext.
|
||||
*/
|
||||
chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
|
||||
bin2hex(response, digest, MD5_SIGNATURE_SIZE);
|
||||
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
|
||||
response);
|
||||
*nr_out_len += 1;
|
||||
|
|
|
@ -637,8 +637,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
|
|||
none = strstr(buf1, NONE);
|
||||
if (none)
|
||||
goto out;
|
||||
strncat(buf1, ",", strlen(","));
|
||||
strncat(buf1, NONE, strlen(NONE));
|
||||
strlcat(buf1, "," NONE, sizeof(buf1));
|
||||
if (iscsi_update_param_value(param, buf1) < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -284,10 +284,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
|
|||
|
||||
mutex_lock(&tz->lock);
|
||||
|
||||
if (mode == THERMAL_DEVICE_ENABLED)
|
||||
if (mode == THERMAL_DEVICE_ENABLED) {
|
||||
tz->polling_delay = data->polling_delay;
|
||||
else
|
||||
tz->passive_delay = data->passive_delay;
|
||||
} else {
|
||||
tz->polling_delay = 0;
|
||||
tz->passive_delay = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&tz->lock);
|
||||
|
||||
|
|
|
@ -629,8 +629,10 @@ static int serial_config(struct pcmcia_device * link)
|
|||
(link->has_func_id) &&
|
||||
(link->socket->pcmcia_pfc == 0) &&
|
||||
((link->func_id == CISTPL_FUNCID_MULTI) ||
|
||||
(link->func_id == CISTPL_FUNCID_SERIAL)))
|
||||
pcmcia_loop_config(link, serial_check_for_multi, info);
|
||||
(link->func_id == CISTPL_FUNCID_SERIAL))) {
|
||||
if (pcmcia_loop_config(link, serial_check_for_multi, info))
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply any multi-port quirk.
|
||||
|
|
|
@ -1068,8 +1068,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
|
|||
/* Get the address of the host memory buffer.
|
||||
*/
|
||||
bdp = pinfo->rx_cur;
|
||||
while (bdp->cbd_sc & BD_SC_EMPTY)
|
||||
;
|
||||
if (bdp->cbd_sc & BD_SC_EMPTY)
|
||||
return NO_POLL_CHAR;
|
||||
|
||||
/* If the buffer address is in the CPM DPRAM, don't
|
||||
* convert it.
|
||||
|
@ -1104,7 +1104,11 @@ static int cpm_get_poll_char(struct uart_port *port)
|
|||
poll_chars = 0;
|
||||
}
|
||||
if (poll_chars <= 0) {
|
||||
poll_chars = poll_wait_key(poll_buf, pinfo);
|
||||
int ret = poll_wait_key(poll_buf, pinfo);
|
||||
|
||||
if (ret == NO_POLL_CHAR)
|
||||
return ret;
|
||||
poll_chars = ret;
|
||||
pollp = poll_buf;
|
||||
}
|
||||
poll_chars--;
|
||||
|
|
|
@ -1997,6 +1997,14 @@ static int serial_imx_probe(struct platform_device *pdev)
|
|||
dev_name(&pdev->dev), sport);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
|
||||
dev_name(&pdev->dev), sport);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to request rts irq: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
|
||||
dev_name(&pdev->dev), sport);
|
||||
|
|
|
@ -453,7 +453,7 @@ static int clear_wdm_read_flag(struct wdm_device *desc)
|
|||
|
||||
set_bit(WDM_RESPONDING, &desc->flags);
|
||||
spin_unlock_irq(&desc->iuspin);
|
||||
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
|
||||
rv = usb_submit_urb(desc->response, GFP_KERNEL);
|
||||
spin_lock_irq(&desc->iuspin);
|
||||
if (rv) {
|
||||
dev_err(&desc->intf->dev,
|
||||
|
|
|
@ -1289,10 +1289,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
struct async *as = NULL;
|
||||
struct usb_ctrlrequest *dr = NULL;
|
||||
unsigned int u, totlen, isofrmlen;
|
||||
int i, ret, is_in, num_sgs = 0, ifnum = -1;
|
||||
int i, ret, num_sgs = 0, ifnum = -1;
|
||||
int number_of_packets = 0;
|
||||
unsigned int stream_id = 0;
|
||||
void *buf;
|
||||
bool is_in;
|
||||
bool allow_short = false;
|
||||
bool allow_zero = false;
|
||||
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
|
||||
USBDEVFS_URB_BULK_CONTINUATION |
|
||||
USBDEVFS_URB_NO_FSBR |
|
||||
|
@ -1326,6 +1329,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
u = 0;
|
||||
switch (uurb->type) {
|
||||
case USBDEVFS_URB_TYPE_CONTROL:
|
||||
if (is_in)
|
||||
allow_short = true;
|
||||
if (!usb_endpoint_xfer_control(&ep->desc))
|
||||
return -EINVAL;
|
||||
/* min 8 byte setup packet */
|
||||
|
@ -1366,6 +1371,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
break;
|
||||
|
||||
case USBDEVFS_URB_TYPE_BULK:
|
||||
if (!is_in)
|
||||
allow_zero = true;
|
||||
else
|
||||
allow_short = true;
|
||||
switch (usb_endpoint_type(&ep->desc)) {
|
||||
case USB_ENDPOINT_XFER_CONTROL:
|
||||
case USB_ENDPOINT_XFER_ISOC:
|
||||
|
@ -1386,6 +1395,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
if (!usb_endpoint_xfer_int(&ep->desc))
|
||||
return -EINVAL;
|
||||
interrupt_urb:
|
||||
if (!is_in)
|
||||
allow_zero = true;
|
||||
else
|
||||
allow_short = true;
|
||||
break;
|
||||
|
||||
case USBDEVFS_URB_TYPE_ISO:
|
||||
|
@ -1512,16 +1525,21 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
|
||||
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
|
||||
u |= URB_ISO_ASAP;
|
||||
if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
|
||||
if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
|
||||
u |= URB_SHORT_NOT_OK;
|
||||
if (uurb->flags & USBDEVFS_URB_NO_FSBR)
|
||||
u |= URB_NO_FSBR;
|
||||
if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
|
||||
if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
|
||||
u |= URB_ZERO_PACKET;
|
||||
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
|
||||
u |= URB_NO_INTERRUPT;
|
||||
as->urb->transfer_flags = u;
|
||||
|
||||
if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
|
||||
dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
|
||||
if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
|
||||
dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
|
||||
|
||||
as->urb->transfer_buffer_length = uurb->buffer_length;
|
||||
as->urb->setup_packet = (unsigned char *)dr;
|
||||
dr = NULL;
|
||||
|
|
|
@ -509,7 +509,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
|
|||
struct device *dev;
|
||||
struct usb_device *udev;
|
||||
int retval = 0;
|
||||
int lpm_disable_error = -ENODEV;
|
||||
|
||||
if (!iface)
|
||||
return -ENODEV;
|
||||
|
@ -530,16 +529,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
|
|||
|
||||
iface->condition = USB_INTERFACE_BOUND;
|
||||
|
||||
/* See the comment about disabling LPM in usb_probe_interface(). */
|
||||
if (driver->disable_hub_initiated_lpm) {
|
||||
lpm_disable_error = usb_unlocked_disable_lpm(udev);
|
||||
if (lpm_disable_error) {
|
||||
dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
|
||||
__func__, driver->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Claimed interfaces are initially inactive (suspended) and
|
||||
* runtime-PM-enabled, but only if the driver has autosuspend
|
||||
* support. Otherwise they are marked active, to prevent the
|
||||
|
@ -558,9 +547,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
|
|||
if (device_is_registered(dev))
|
||||
retval = device_bind_driver(dev);
|
||||
|
||||
/* Attempt to re-enable USB3 LPM, if the disable was successful. */
|
||||
if (!lpm_disable_error)
|
||||
usb_unlocked_enable_lpm(udev);
|
||||
if (retval) {
|
||||
dev->driver = NULL;
|
||||
usb_set_intfdata(iface, NULL);
|
||||
iface->needs_remote_wakeup = 0;
|
||||
iface->condition = USB_INTERFACE_UNBOUND;
|
||||
|
||||
/*
|
||||
* Unbound interfaces are always runtime-PM-disabled
|
||||
* and runtime-PM-suspended
|
||||
*/
|
||||
if (driver->supports_autosuspend)
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -95,6 +95,8 @@ struct usb_host_interface *usb_find_alt_setting(
|
|||
struct usb_interface_cache *intf_cache = NULL;
|
||||
int i;
|
||||
|
||||
if (!config)
|
||||
return NULL;
|
||||
for (i = 0; i < config->desc.bNumInterfaces; i++) {
|
||||
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
|
||||
== iface_num) {
|
||||
|
|
|
@ -1066,12 +1066,15 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
|
|||
static int fotg210_udc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
usb_del_gadget_udc(&fotg210->gadget);
|
||||
iounmap(fotg210->reg);
|
||||
free_irq(platform_get_irq(pdev, 0), fotg210);
|
||||
|
||||
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
|
||||
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
|
||||
kfree(fotg210->ep[i]);
|
||||
kfree(fotg210);
|
||||
|
||||
return 0;
|
||||
|
@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
|
|||
/* initialize udc */
|
||||
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
|
||||
if (fotg210 == NULL)
|
||||
goto err_alloc;
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
|
||||
_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
|
||||
|
@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
|
|||
fotg210->reg = ioremap(res->start, resource_size(res));
|
||||
if (fotg210->reg == NULL) {
|
||||
pr_err("ioremap error.\n");
|
||||
goto err_map;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
spin_lock_init(&fotg210->lock);
|
||||
|
@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
|
|||
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
|
||||
GFP_KERNEL);
|
||||
if (fotg210->ep0_req == NULL)
|
||||
goto err_req;
|
||||
goto err_map;
|
||||
|
||||
fotg210_init(fotg210);
|
||||
|
||||
|
@ -1190,12 +1193,14 @@ err_req:
|
|||
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
|
||||
|
||||
err_map:
|
||||
if (fotg210->reg)
|
||||
iounmap(fotg210->reg);
|
||||
iounmap(fotg210->reg);
|
||||
|
||||
err_alloc:
|
||||
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
|
||||
kfree(fotg210->ep[i]);
|
||||
kfree(fotg210);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -431,6 +431,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
|
|||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
mutex_unlock(&dev->io_mutex);
|
||||
|
||||
if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
|
||||
return -EIO;
|
||||
|
||||
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
|
||||
}
|
||||
|
||||
|
|
|
@ -408,12 +408,20 @@ static int kobil_tiocmget(struct tty_struct *tty)
|
|||
transfer_buffer_length,
|
||||
KOBIL_TIMEOUT);
|
||||
|
||||
dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
|
||||
__func__, result, transfer_buffer[0]);
|
||||
dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
|
||||
result);
|
||||
if (result < 1) {
|
||||
if (result >= 0)
|
||||
result = -EIO;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
|
||||
|
||||
result = 0;
|
||||
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
|
||||
result = TIOCM_DSR;
|
||||
out_free:
|
||||
kfree(transfer_buffer);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
|
|||
|
||||
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
|
||||
0, secd, sizeof(*secd));
|
||||
if (result < sizeof(*secd)) {
|
||||
if (result < (int)sizeof(*secd)) {
|
||||
dev_err(dev, "Can't read security descriptor or "
|
||||
"not enough data: %d\n", result);
|
||||
goto out;
|
||||
|
|
|
@ -875,6 +875,7 @@ error_get_version:
|
|||
error_rc_add:
|
||||
usb_put_intf(iface);
|
||||
usb_put_dev(hwarc->usb_dev);
|
||||
kfree(hwarc);
|
||||
error_alloc:
|
||||
uwb_rc_put(uwb_rc);
|
||||
error_rc_alloc:
|
||||
|
|
|
@ -18,15 +18,16 @@ static void enable_hotplug_cpu(int cpu)
|
|||
|
||||
static void disable_hotplug_cpu(int cpu)
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
lock_device_hotplug();
|
||||
if (!cpu_is_hotpluggable(cpu))
|
||||
return;
|
||||
lock_device_hotplug();
|
||||
if (cpu_online(cpu))
|
||||
device_offline(get_cpu_device(cpu));
|
||||
unlock_device_hotplug();
|
||||
}
|
||||
if (cpu_present(cpu))
|
||||
if (!cpu_online(cpu) && cpu_present(cpu)) {
|
||||
xen_arch_unregister_cpu(cpu);
|
||||
|
||||
set_cpu_present(cpu, false);
|
||||
set_cpu_present(cpu, false);
|
||||
}
|
||||
unlock_device_hotplug();
|
||||
}
|
||||
|
||||
static int vcpu_online(unsigned int cpu)
|
||||
|
|
|
@ -139,7 +139,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
|
|||
clear_evtchn_to_irq_row(row);
|
||||
}
|
||||
|
||||
evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
|
||||
evtchn_to_irq[row][col] = irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
|
|||
/*
|
||||
* The Xenstore watch fires directly after registering it and
|
||||
* after a suspend/resume cycle. So ENOENT is no error but
|
||||
* might happen in those cases.
|
||||
* might happen in those cases. ERANGE is observed when we get
|
||||
* an empty value (''), this happens when we acknowledge the
|
||||
* request by writing '\0' below.
|
||||
*/
|
||||
if (err != -ENOENT)
|
||||
if (err != -ENOENT && err != -ERANGE)
|
||||
pr_err("Error %d reading sysrq code in control/sysrq\n",
|
||||
err);
|
||||
xenbus_transaction_end(xbt, 1);
|
||||
|
|
|
@ -101,9 +101,6 @@ convert_sfm_char(const __u16 src_char, char *target)
|
|||
case SFM_LESSTHAN:
|
||||
*target = '<';
|
||||
break;
|
||||
case SFM_SLASH:
|
||||
*target = '\\';
|
||||
break;
|
||||
case SFM_SPACE:
|
||||
*target = ' ';
|
||||
break;
|
||||
|
|
|
@ -577,10 +577,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
|
|||
}
|
||||
|
||||
count = 0;
|
||||
/*
|
||||
* We know that all the name entries in the protocols array
|
||||
* are short (< 16 bytes anyway) and are NUL terminated.
|
||||
*/
|
||||
for (i = 0; i < CIFS_NUM_PROT; i++) {
|
||||
strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
|
||||
count += strlen(protocols[i].name) + 1;
|
||||
/* null at end of source and target buffers anyway */
|
||||
size_t len = strlen(protocols[i].name) + 1;
|
||||
|
||||
memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
|
||||
count += len;
|
||||
}
|
||||
inc_rfc1001_len(pSMB, count);
|
||||
pSMB->ByteCount = cpu_to_le16(count);
|
||||
|
|
|
@ -406,9 +406,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
|
|||
(struct smb_com_transaction_change_notify_rsp *)buf;
|
||||
struct file_notify_information *pnotify;
|
||||
__u32 data_offset = 0;
|
||||
size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
|
||||
|
||||
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
|
||||
data_offset = le32_to_cpu(pSMBr->DataOffset);
|
||||
|
||||
if (data_offset >
|
||||
len - sizeof(struct file_notify_information)) {
|
||||
cifs_dbg(FYI, "invalid data_offset %u\n",
|
||||
data_offset);
|
||||
return true;
|
||||
}
|
||||
pnotify = (struct file_notify_information *)
|
||||
((char *)&pSMBr->hdr.Protocol + data_offset);
|
||||
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
|
||||
|
|
|
@ -914,7 +914,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
}
|
||||
|
||||
srch_inf->entries_in_buffer = 0;
|
||||
srch_inf->index_of_last_entry = 0;
|
||||
srch_inf->index_of_last_entry = 2;
|
||||
|
||||
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
|
||||
fid->volatile_fid, 0, srch_inf);
|
||||
|
|
|
@ -1412,6 +1412,11 @@ retry:
|
|||
/* Find the entry best suited to be pushed into EA block */
|
||||
entry = NULL;
|
||||
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
|
||||
/* never move system.data out of the inode */
|
||||
if ((last->e_name_len == 4) &&
|
||||
(last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
|
||||
!memcmp(last->e_name, "data", 4))
|
||||
continue;
|
||||
total_size =
|
||||
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
|
||||
EXT4_XATTR_LEN(last->e_name_len);
|
||||
|
|
|
@ -1632,6 +1632,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
|
|||
if (status) {
|
||||
op = &args->ops[0];
|
||||
op->status = status;
|
||||
resp->opcnt = 1;
|
||||
goto encode_op;
|
||||
}
|
||||
|
||||
|
|
|
@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
|
|||
|
||||
res->last_used = 0;
|
||||
|
||||
spin_lock(&dlm->spinlock);
|
||||
spin_lock(&dlm->track_lock);
|
||||
list_add_tail(&res->tracking, &dlm->tracking_list);
|
||||
spin_unlock(&dlm->spinlock);
|
||||
spin_unlock(&dlm->track_lock);
|
||||
|
||||
memset(res->lvb, 0, DLM_LVB_LEN);
|
||||
memset(res->refmap, 0, sizeof(res->refmap));
|
||||
|
|
|
@ -472,6 +472,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
|||
int err;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The ability to racily run the kernel stack unwinder on a running task
|
||||
* and then observe the unwinder output is scary; while it is useful for
|
||||
* debugging kernel issues, it can also allow an attacker to leak kernel
|
||||
* stack contents.
|
||||
* Doing this in a manner that is at least safe from races would require
|
||||
* some work to ensure that the remote task can not be scheduled; and
|
||||
* even then, this would still expose the unwinder as local attack
|
||||
* surface.
|
||||
* Therefore, this interface is restricted to root.
|
||||
*/
|
||||
if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Driver for Texas Instruments INA219, INA226 power monitor chips
|
||||
*
|
||||
* Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
|
||||
* Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
|
|
@ -67,7 +67,8 @@ struct kmem_cache {
|
|||
int size; /* The size of an object including meta data */
|
||||
int object_size; /* The size of an object without meta data */
|
||||
int offset; /* Free pointer offset. */
|
||||
int cpu_partial; /* Number of per cpu partial objects to keep around */
|
||||
/* Number of per cpu partial objects to keep around */
|
||||
unsigned int cpu_partial;
|
||||
struct kmem_cache_order_objects oo;
|
||||
|
||||
/* Allocation and freeing of slabs */
|
||||
|
|
|
@ -43,6 +43,7 @@ struct v4l2_fh {
|
|||
wait_queue_head_t wait;
|
||||
struct list_head subscribed; /* Subscribed events */
|
||||
struct list_head available; /* Dequeueable event */
|
||||
struct mutex subscribe_lock;
|
||||
unsigned int navailable;
|
||||
u32 sequence;
|
||||
|
||||
|
|
|
@ -3860,7 +3860,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
|
|||
|
||||
for (i = 0; i < kallsyms->num_symtab; i++)
|
||||
if (strcmp(name, symname(kallsyms, i)) == 0 &&
|
||||
kallsyms->symtab[i].st_info != 'U')
|
||||
kallsyms->symtab[i].st_shndx != SHN_UNDEF)
|
||||
return kallsyms->symtab[i].st_value;
|
||||
return 0;
|
||||
}
|
||||
|
@ -3906,6 +3906,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
|
|||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
for (i = 0; i < kallsyms->num_symtab; i++) {
|
||||
|
||||
if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
|
||||
continue;
|
||||
|
||||
ret = fn(data, symname(kallsyms, i),
|
||||
mod, kallsyms->symtab[i].st_value);
|
||||
if (ret != 0)
|
||||
|
|
|
@ -773,7 +773,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
|
|||
/* Convert (if necessary) to absolute time */
|
||||
if (flags != TIMER_ABSTIME) {
|
||||
ktime_t now = alarm_bases[type].gettime();
|
||||
exp = ktime_add(now, exp);
|
||||
|
||||
exp = ktime_add_safe(now, exp);
|
||||
}
|
||||
|
||||
if (alarmtimer_do_nsleep(&alarm, exp))
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue