Merge android-4.4.107 (79f138a) into msm-4.4

* refs/heads/tmp-79f138a
  Linux 4.4.107
  ath9k: fix tx99 potential info leak
  IB/ipoib: Grab rtnl lock on heavy flush when calling ndo_open/stop
  RDMA/cma: Avoid triggering undefined behavior
  macvlan: Only deliver one copy of the frame to the macvlan interface
  udf: Avoid overflow when session starts at large offset
  scsi: bfa: integer overflow in debugfs
  scsi: sd: change allow_restart to bool in sysfs interface
  scsi: sd: change manage_start_stop to bool in sysfs interface
  vt6655: Fix a possible sleep-in-atomic bug in vt6655_suspend
  scsi: scsi_devinfo: Add REPORTLUN2 to EMC SYMMETRIX blacklist entry
  raid5: Set R5_Expanded on parity devices as well as data.
  pinctrl: adi2: Fix Kconfig build problem
  usb: musb: da8xx: fix babble condition handling
  tty fix oops when rmmod 8250
  powerpc/perf/hv-24x7: Fix incorrect comparison in memord
  scsi: hpsa: destroy sas transport properties before scsi_host
  scsi: hpsa: cleanup sas_phy structures in sysfs when unloading
  PCI: Detach driver before procfs & sysfs teardown on device remove
  xfs: fix incorrect extent state in xfs_bmap_add_extent_unwritten_real
  xfs: fix log block underflow during recovery cycle verification
  l2tp: cleanup l2tp_tunnel_delete calls
  bcache: fix wrong cache_misses statistics
  bcache: explicitly destroy mutex while exiting
  GFS2: Take inode off order_write list when setting jdata flag
  thermal/drivers/step_wise: Fix temperature regulation misbehavior
  ppp: Destroy the mutex when cleanup
  clk: tegra: Fix cclk_lp divisor register
  clk: imx6: refine hdmi_isfr's parent to make HDMI work on i.MX6 SoCs w/o VPU
  clk: mediatek: add the option for determining PLL source clock
  mm: Handle 0 flags in _calc_vm_trans() macro
  crypto: tcrypt - fix buffer lengths in test_aead_speed()
  arm-ccn: perf: Prevent module unload while PMU is in use
  target/file: Do not return error for UNMAP if length is zero
  target:fix condition return in core_pr_dump_initiator_port()
  iscsi-target: fix memory leak in lio_target_tiqn_addtpg()
  target/iscsi: Fix a race condition in iscsit_add_reject_from_cmd()
  powerpc/ipic: Fix status get and status clear
  powerpc/opal: Fix EBUSY bug in acquiring tokens
  netfilter: ipvs: Fix inappropriate output of procfs
  powerpc/powernv/cpufreq: Fix the frequency read by /proc/cpuinfo
  PCI/PME: Handle invalid data when reading Root Status
  dmaengine: ti-dma-crossbar: Correct am335x/am43xx mux value type
  rtc: pcf8563: fix output clock rate
  video: fbdev: au1200fb: Return an error code if a memory allocation fails
  video: fbdev: au1200fb: Release some resources if a memory allocation fails
  video: udlfb: Fix read EDID timeout
  fbdev: controlfb: Add missing modes to fix out of bounds access
  sfc: don't warn on successful change of MAC
  target: fix race during implicit transition work flushes
  target: fix ALUA transition timeout handling
  target: Use system workqueue for ALUA transitions
  btrfs: add missing memset while reading compressed inline extents
  NFSv4.1 respect server's max size in CREATE_SESSION
  efi/esrt: Cleanup bad memory map log messages
  perf symbols: Fix symbols__fixup_end heuristic for corner cases
  net/mlx4_core: Avoid delays during VF driver device shutdown
  afs: Fix afs_kill_pages()
  afs: Fix page leak in afs_write_begin()
  afs: Populate and use client modification time
  afs: Fix the maths in afs_fs_store_data()
  afs: Prevent callback expiry timer overflow
  afs: Migrate vlocation fields to 64-bit
  afs: Flush outstanding writes when an fd is closed
  afs: Adjust mode bits processing
  afs: Populate group ID from vnode status
  afs: Fix missing put_page()
  drm/radeon: reinstate oland workaround for sclk
  mmc: mediatek: Fixed bug where clock frequency could be set wrong
  sched/deadline: Use deadline instead of period when calculating overflow
  sched/deadline: Throttle a constrained deadline task activated after the deadline
  sched/deadline: Make sure the replenishment timer fires in the next period
  drm/radeon/si: add dpm quirk for Oland
  fjes: Fix wrong netdevice feature flags
  scsi: hpsa: limit outstanding rescans
  scsi: hpsa: update check for logical volume status
  openrisc: fix issue handling 8 byte get_user calls
  intel_th: pci: Add Gemini Lake support
  mlxsw: reg: Fix SPVMLR max record count
  mlxsw: reg: Fix SPVM max record count
  net: Resend IGMP memberships upon peer notification.
  dmaengine: Fix array index out of bounds warning in __get_unmap_pool()
  net: wimax/i2400m: fix NULL-deref at probe
  writeback: fix memory leak in wb_queue_work()
  netfilter: bridge: honor frag_max_size when refragmenting
  drm/omap: fix dmabuf mmap for dma_alloc'ed buffers
  Input: i8042 - add TUXEDO BU1406 (N24_25BU) to the nomux list
  NFSD: fix nfsd_reset_versions for NFSv4.
  NFSD: fix nfsd_minorversion(.., NFSD_AVAIL)
  net: bcmgenet: Power up the internal PHY before probing the MII
  net: bcmgenet: power down internal phy if open or resume fails
  net: bcmgenet: reserved phy revisions must be checked first
  net: bcmgenet: correct MIB access of UniMAC RUNT counters
  net: bcmgenet: correct the RBUF_OVFL_CNT and RBUF_ERR_CNT MIB values
  net: initialize msg.msg_flags in recvfrom
  userfaultfd: selftest: vm: allow to build in vm/ directory
  userfaultfd: shmem: __do_fault requires VM_FAULT_NOPAGE
  md-cluster: free md_cluster_info if node leave cluster
  usb: phy: isp1301: Add OF device ID table
  mac80211: Fix addition of mesh configuration element
  KEYS: add missing permission check for request_key() destination
  ext4: fix crash when a directory's i_size is too small
  ext4: fix fdatasync(2) after fallocate(2) operation
  dmaengine: dmatest: move callback wait queue to thread context
  sched/rt: Do not pull from current CPU if only one CPU to pull
  xhci: Don't add a virt_dev to the devs array before it's fully allocated
  Bluetooth: btusb: driver to enable the usb-wakeup feature
  ceph: drop negative child dentries before try pruning inode's alias
  usbip: fix stub_send_ret_submit() vulnerability to null transfer_buffer
  USB: core: prevent malicious bNumInterfaces overflow
  USB: uas and storage: Add US_FL_BROKEN_FUA for another JMicron JMS567 ID
  tracing: Allocate mask_str buffer dynamically
  autofs: fix careless error in recent commit
  crypto: salsa20 - fix blkcipher_walk API usage
  crypto: hmac - require that the underlying hash algorithm is unkeyed
  UPSTREAM: arm64: setup: introduce kaslr_offset()
  UPSTREAM: kcov: fix comparison callback signature
  UPSTREAM: kcov: support comparison operands collection
  UPSTREAM: kcov: remove pointless current != NULL check
  UPSTREAM: kcov: support compat processes
  UPSTREAM: kcov: simplify interrupt check
  UPSTREAM: kcov: make kcov work properly with KASLR enabled
  UPSTREAM: kcov: add more missing includes
  UPSTREAM: kcov: add missing #include <linux/sched.h>
  UPSTREAM: kcov: properly check if we are in an interrupt
  UPSTREAM: kcov: don't profile branches in kcov
  UPSTREAM: kcov: don't trace the code coverage code
  BACKPORT: kernel: add kcov code coverage

Conflicts:
	Makefile
	mm/kasan/Makefile
	scripts/Makefile.lib

Change-Id: Ic19953706ea2e700621b0ba94d1c90bbffa4f471
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2018-01-09 16:05:02 +05:30
commit 2d309c994d
141 changed files with 1473 additions and 317 deletions

111
Documentation/kcov.txt Normal file
View file

@ -0,0 +1,111 @@
kcov: code coverage for fuzzing
===============================
kcov exposes kernel code coverage information in a form suitable for coverage-
guided fuzzing (randomized testing). Coverage data of a running kernel is
exported via the "kcov" debugfs file. Coverage collection is enabled on a task
basis, and thus it can capture precise coverage of a single system call.
Note that kcov does not aim to collect as much coverage as possible. It aims
to collect more or less stable coverage that is function of syscall inputs.
To achieve this goal it does not collect coverage in soft/hard interrupts
and instrumentation of some inherently non-deterministic parts of kernel is
disbled (e.g. scheduler, locking).
Usage:
======
Configure kernel with:
CONFIG_KCOV=y
CONFIG_KCOV requires gcc built on revision 231296 or later.
Profiling data will only become accessible once debugfs has been mounted:
mount -t debugfs none /sys/kernel/debug
The following program demonstrates kcov usage from within a test program:
#include <stdio.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
#define COVER_SIZE (64<<10)
int main(int argc, char **argv)
{
int fd;
unsigned long *cover, n, i;
/* A single fd descriptor allows coverage collection on a single
* thread.
*/
fd = open("/sys/kernel/debug/kcov", O_RDWR);
if (fd == -1)
perror("open"), exit(1);
/* Setup trace mode and trace size. */
if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
perror("ioctl"), exit(1);
/* Mmap buffer shared between kernel- and user-space. */
cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if ((void*)cover == MAP_FAILED)
perror("mmap"), exit(1);
/* Enable coverage collection on the current thread. */
if (ioctl(fd, KCOV_ENABLE, 0))
perror("ioctl"), exit(1);
/* Reset coverage from the tail of the ioctl() call. */
__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
/* That's the target syscal call. */
read(-1, NULL, 0);
/* Read number of PCs collected. */
n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
for (i = 0; i < n; i++)
printf("0x%lx\n", cover[i + 1]);
/* Disable coverage collection for the current thread. After this call
* coverage can be enabled for a different thread.
*/
if (ioctl(fd, KCOV_DISABLE, 0))
perror("ioctl"), exit(1);
/* Free resources. */
if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
perror("munmap"), exit(1);
if (close(fd))
perror("close"), exit(1);
return 0;
}
After piping through addr2line output of the program looks as follows:
SyS_read
fs/read_write.c:562
__fdget_pos
fs/file.c:774
__fget_light
fs/file.c:746
__fget_light
fs/file.c:750
__fget_light
fs/file.c:760
__fdget_pos
fs/file.c:784
SyS_read
fs/read_write.c:562
If a program needs to collect coverage from several threads (independently),
it needs to open /sys/kernel/debug/kcov in each thread separately.
The interface is fine-grained to allow efficient forking of test processes.
That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode,
mmaps coverage buffer and then forks child processes in a loop. Child processes
only need to enable coverage (disable happens automatically on thread end).

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 106
SUBLEVEL = 107
EXTRAVERSION =
NAME = Blurry Fish Butt
@ -373,6 +373,7 @@ LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
CFLAGS_KCOV = -fsanitize-coverage=trace-pc
# Use USERINCLUDE when you must reference the UAPI directories only.
@ -420,7 +421,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN CFLAGS_UBSAN
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@ -697,6 +698,14 @@ endif
endif
KBUILD_CFLAGS += $(stackp-flag)
ifdef CONFIG_KCOV
ifeq ($(call cc-option, $(CFLAGS_KCOV)),)
$(warning Cannot use CONFIG_KCOV: \
-fsanitize-coverage=trace-pc is not supported by compiler)
CFLAGS_KCOV =
endif
endif
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE)

View file

@ -148,6 +148,11 @@ extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
extern u64 kimage_voffset;
static inline unsigned long kaslr_offset(void)
{
return kimage_vaddr - KIMAGE_VADDR;
}
/*
* Allow all memory at the discovery stage. We will clip it later.
*/

View file

@ -429,11 +429,11 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
const unsigned long offset = kaslr_offset();
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
kaslr_offset, KIMAGE_VADDR);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
offset, KIMAGE_VADDR);
} else {
pr_emerg("Kernel Offset: disabled\n");
}

View file

@ -318,11 +318,14 @@ config BF53x
config GPIO_ADI
def_bool y
depends on !PINCTRL
depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
config PINCTRL
config PINCTRL_BLACKFIN_ADI2
def_bool y
depends on BF54x || BF60x
depends on (BF54x || BF60x)
select PINCTRL
select PINCTRL_ADI2
config MEM_MT48LC64M4A2FB_7E
bool

View file

@ -17,6 +17,7 @@ config DEBUG_VERBOSE
config DEBUG_MMRS
tristate "Generate Blackfin MMR tree"
depends on !PINCTRL
select DEBUG_FS
help
Create a tree of Blackfin MMRs via the debugfs tree. If

View file

@ -215,7 +215,7 @@ do { \
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); \
case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)

View file

@ -514,7 +514,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
{
if (s1 < s2)
return 1;
if (s2 > s1)
if (s1 > s2)
return -1;
return memcmp(d1, d2, s1);

View file

@ -39,18 +39,18 @@ int __opal_async_get_token(void)
int token;
spin_lock_irqsave(&opal_async_comp_lock, flags);
token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens);
if (token >= opal_max_async_tokens) {
token = -EBUSY;
goto out;
}
if (__test_and_set_bit(token, opal_async_token_map)) {
if (!__test_and_clear_bit(token, opal_async_complete_map)) {
token = -EBUSY;
goto out;
}
__clear_bit(token, opal_async_complete_map);
__set_bit(token, opal_async_token_map);
out:
spin_unlock_irqrestore(&opal_async_comp_lock, flags);

View file

@ -295,7 +295,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
{
unsigned long ret_freq;
ret_freq = cpufreq_quick_get(cpu) * 1000ul;
ret_freq = cpufreq_get(cpu) * 1000ul;
/*
* If the backend cpufreq driver does not exist,

View file

@ -845,12 +845,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
u32 ipic_get_mcp_status(void)
{
return ipic_read(primary_ipic->regs, IPIC_SERMR);
return ipic_read(primary_ipic->regs, IPIC_SERSR);
}
void ipic_clear_mcp_status(u32 mask)
{
ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
}
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */

View file

@ -27,6 +27,7 @@ config X86
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_SG_CHAIN

View file

@ -11,6 +11,13 @@
KASAN_SANITIZE := n
# Kernel does not boot with kcov instrumentation here.
# One of the problems observed was insertion of __sanitizer_cov_trace_pc()
# callback into middle of per-cpu data enabling code. Thus the callback observed
# inconsistent state and crashed. We are interested mostly in syscall coverage,
# so boot code is not interesting anyway.
KCOV_INSTRUMENT := n
# If you want to preset the SVGA mode, uncomment the next line and
# set SVGA_MODE to whatever number you want.
# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.

View file

@ -18,6 +18,9 @@
KASAN_SANITIZE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4

View file

@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
salsa20_ivsetup(ctx, walk.iv);
if (likely(walk.nbytes == nbytes))
{
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr, nbytes);
return blkcipher_walk_done(desc, &walk, 0);
}
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
walk.dst.virt.addr,

View file

@ -5,6 +5,9 @@
KBUILD_CFLAGS += $(DISABLE_LTO)
KASAN_SANITIZE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
VDSO64-$(CONFIG_X86_64) := y
VDSOX32-$(CONFIG_X86_X32_ABI) := y
VDSO32-$(CONFIG_X86_32) := y

View file

@ -2,6 +2,10 @@
# Makefile for local APIC drivers and for the IO-APIC code
#
# Leads to non-deterministic coverage that is not a function of syscall inputs.
# In particualr, smp_apic_timer_interrupt() is called in random places.
KCOV_INSTRUMENT := n
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o
obj-y += hw_nmi.o

View file

@ -8,6 +8,10 @@ CFLAGS_REMOVE_common.o = -pg
CFLAGS_REMOVE_perf_event.o = -pg
endif
# If these files are instrumented, boot hangs during the first second.
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o := $(nostackp)

View file

@ -2,6 +2,9 @@
# Makefile for x86 specific library files.
#
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@

View file

@ -1,3 +1,6 @@
# Kernel does not boot with instrumentation of tlb.c.
KCOV_INSTRUMENT_tlb.o := n
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o gup.o setup_nx.o

View file

@ -8,6 +8,9 @@
#
KASAN_SANITIZE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
always := realmode.bin realmode.relocs
wakeup-objs := wakeup_asm.o wakemain.o video-mode.o

View file

@ -194,11 +194,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
salg = shash_attr_alg(tb[1], 0, 0);
if (IS_ERR(salg))
return PTR_ERR(salg);
alg = &salg->base;
/* The underlying hash algorithm must be unkeyed */
err = -EINVAL;
if (crypto_shash_alg_has_setkey(salg))
goto out_put_alg;
ds = salg->digestsize;
ss = salg->statesize;
alg = &salg->base;
if (ds > alg->cra_blocksize ||
ss < alg->cra_blocksize)
goto out_put_alg;

View file

@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
salsa20_ivsetup(ctx, walk.iv);
if (likely(walk.nbytes == nbytes))
{
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr, nbytes);
return blkcipher_walk_done(desc, &walk, 0);
}
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
walk.src.virt.addr,

View file

@ -24,11 +24,12 @@
static const struct crypto_type crypto_shash_type;
static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(shash_no_setkey);
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)

View file

@ -410,7 +410,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
sg_init_aead(sg, xbuf,
*b_size + (enc ? authsize : 0));
*b_size + (enc ? 0 : authsize));
sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0));
@ -418,7 +418,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
sg_set_buf(&sg[0], assoc, aad_size);
sg_set_buf(&sgout[0], assoc, aad_size);
aead_request_set_crypt(req, sg, sgout, *b_size, iv);
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
iv);
aead_request_set_ad(req, aad_size);
if (secs)

View file

@ -1050,6 +1050,10 @@ static int btusb_open(struct hci_dev *hdev)
return err;
data->intf->needs_remote_wakeup = 1;
/* device specific wakeup source enabled and required for USB
* remote wakeup while host is suspended
*/
device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@ -1113,6 +1117,7 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:

View file

@ -1260,6 +1260,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
.module = THIS_MODULE,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,

View file

@ -419,7 +419,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4);
clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);

View file

@ -174,6 +174,7 @@ struct mtk_pll_data {
uint32_t pcw_reg;
int pcw_shift;
const struct mtk_pll_div_table *div_table;
const char *parent_name;
};
void mtk_clk_register_plls(struct device_node *node,

View file

@ -302,7 +302,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
init.name = data->name;
init.ops = &mtk_pll_ops;
init.parent_names = &parent_name;
if (data->parent_name)
init.parent_names = &data->parent_name;
else
init.parent_names = &parent_name;
init.num_parents = 1;
clk = clk_register(NULL, &pll->hw);

View file

@ -1063,7 +1063,7 @@ static void __init tegra30_super_clk_init(void)
* U71 divider of cclk_lp.
*/
clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
clk_base + SUPER_CCLKG_DIVIDER, 0,
clk_base + SUPER_CCLKLP_DIVIDER, 0,
TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);

View file

@ -1023,12 +1023,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
switch (order) {
case 0 ... 1:
return &unmap_pool[0];
#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
case 2 ... 4:
return &unmap_pool[1];
case 5 ... 7:
return &unmap_pool[2];
case 8:
return &unmap_pool[3];
#endif
default:
BUG();
return NULL;

View file

@ -148,6 +148,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
/* poor man's completion - we want to use wait_event_freezable() on it */
struct dmatest_done {
bool done;
wait_queue_head_t *wait;
};
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
@ -156,6 +162,8 @@ struct dmatest_thread {
u8 **srcs;
u8 **dsts;
enum dma_transaction_type type;
wait_queue_head_t done_wait;
struct dmatest_done test_done;
bool done;
};
@ -316,18 +324,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
/* poor man's completion - we want to use wait_event_freezable() on it */
struct dmatest_done {
bool done;
wait_queue_head_t *wait;
};
static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
done->done = true;
wake_up_all(done->wait);
struct dmatest_thread *thread =
container_of(arg, struct dmatest_thread, done_wait);
if (!thread->done) {
done->done = true;
wake_up_all(done->wait);
} else {
/*
* If thread->done, it means that this callback occurred
* after the parent thread has cleaned up. This can
* happen in the case that driver doesn't implement
* the terminate_all() functionality and a dma operation
* did not occur within the timeout period
*/
WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
}
}
static unsigned int min_odd(unsigned int x, unsigned int y)
@ -398,9 +413,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
*/
static int dmatest_func(void *data)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
struct dmatest_done *done = &thread->test_done;
struct dmatest_info *info;
struct dmatest_params *params;
struct dma_chan *chan;
@ -605,9 +619,9 @@ static int dmatest_func(void *data)
continue;
}
done.done = false;
done->done = false;
tx->callback = dmatest_callback;
tx->callback_param = &done;
tx->callback_param = done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@ -620,21 +634,12 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
wait_event_freezable_timeout(done_wait, done.done,
wait_event_freezable_timeout(thread->done_wait, done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (!done.done) {
/*
* We're leaving the timed out dma operation with
* dangling pointer to done_wait. To make this
* correct, we'll need to allocate wait_done for
* each test iteration and perform "who's gonna
* free it this time?" dancing. For now, just
* leave it dangling.
*/
WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
if (!done->done) {
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
@ -708,7 +713,7 @@ err_thread_type:
dmatest_KBs(runtime, total_len), ret);
/* terminate all transfers on specified channels */
if (ret)
if (ret || failed_tests)
dmaengine_terminate_all(chan);
thread->done = true;
@ -766,6 +771,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
thread->test_done.wait = &thread->done_wait;
init_waitqueue_head(&thread->done_wait);
smp_wmb();
thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);

View file

@ -46,12 +46,12 @@ struct ti_am335x_xbar_data {
struct ti_am335x_xbar_map {
u16 dma_line;
u16 mux_val;
u8 mux_val;
};
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
writeb_relaxed(val & 0x1f, iomem + event);
writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
@ -102,7 +102,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
}
map->dma_line = (u16)dma_spec->args[0];
map->mux_val = (u16)dma_spec->args[2];
map->mux_val = (u8)dma_spec->args[2];
dma_spec->args[2] = 0;
dma_spec->args_count = 2;

View file

@ -312,7 +312,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
early_memunmap(md, sizeof (*md));
}
pr_err_once("requested map not found.\n");
return -ENOENT;
}

View file

@ -253,7 +253,7 @@ void __init efi_esrt_init(void)
rc = efi_mem_desc_lookup(efi.esrt, &md);
if (rc < 0) {
pr_err("ESRT header is not in the memory map.\n");
pr_warn("ESRT header is not in the memory map.\n");
return;
}

View file

@ -24,6 +24,9 @@ GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64

View file

@ -142,9 +142,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
int ret = 0;
if (WARN_ON(!obj->filp))
return -EINVAL;
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
if (ret < 0)
return ret;

View file

@ -3029,6 +3029,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
max_sclk = 75000;
max_mclk = 80000;
}
} else if (rdev->family == CHIP_OLAND) {
if ((rdev->pdev->revision == 0xC7) ||
(rdev->pdev->revision == 0x80) ||
(rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0x87) ||
(rdev->pdev->device == 0x6604) ||
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
}
/* Apply dpm quirks */
while (p && p->chip_device != 0) {

View file

@ -82,6 +82,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
.driver_data = (kernel_ulong_t)0,
},
{
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
.driver_data = (kernel_ulong_t)0,
},
{ 0 },
};

View file

@ -1353,7 +1353,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
return id_priv;
}
static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
{
return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
}
@ -1731,7 +1731,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
struct net_device *net_dev;
int offset, ret;
u8 offset;
int ret;
listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
if (IS_ERR(listen_id))
@ -3118,7 +3119,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
struct ib_cm_sidr_req_param req;
struct ib_cm_id *id;
void *private_data;
int offset, ret;
u8 offset;
int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
@ -3175,7 +3177,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_route *route;
void *private_data;
struct ib_cm_id *id;
int offset, ret;
u8 offset;
int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);

View file

@ -1044,10 +1044,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
rtnl_lock();
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
if (ipoib_ib_dev_open(dev) != 0)
result = ipoib_ib_dev_open(dev);
rtnl_unlock();
if (result)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
}

View file

@ -520,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
},
{
/* TUXEDO BU1406 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
},
{ }
};

View file

@ -468,6 +468,7 @@ struct search {
unsigned recoverable:1;
unsigned write:1;
unsigned read_dirty_data:1;
unsigned cache_missed:1;
unsigned long start_time;
@ -653,6 +654,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->orig_bio = bio;
s->cache_miss = NULL;
s->cache_missed = 0;
s->d = d;
s->recoverable = 1;
s->write = (bio->bi_rw & REQ_WRITE) != 0;
@ -776,7 +778,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
!s->cache_miss, s->iop.bypass);
!s->cache_missed, s->iop.bypass);
trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
if (s->iop.error)
@ -795,6 +797,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
s->cache_missed = 1;
if (s->cache_miss || s->iop.bypass) {
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;

View file

@ -2083,6 +2083,7 @@ static void bcache_exit(void)
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
mutex_destroy(&bch_register_lock);
}
static int __init bcache_init(void)
@ -2101,14 +2102,15 @@ static int __init bcache_init(void)
bcache_major = register_blkdev(0, "bcache");
if (bcache_major < 0) {
unregister_reboot_notifier(&reboot);
mutex_destroy(&bch_register_lock);
return bcache_major;
}
if (!(bcache_wq = create_workqueue("bcache")) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
bch_debug_init(bcache_kobj))
bch_debug_init(bcache_kobj) ||
sysfs_create_files(bcache_kobj, files))
goto err;
return 0;

View file

@ -821,6 +821,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->no_new_dev_lockres);
lockres_free(cinfo->bitmap_lockres);
dlm_release_lockspace(cinfo->lockspace, 2);
kfree(cinfo);
return 0;
}

View file

@ -1681,8 +1681,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) {
if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
set_bit(R5_UPTODATE, &dev->flags);
if (test_bit(STRIPE_EXPAND_READY, &sh->state))
set_bit(R5_Expanded, &dev->flags);
}
if (fua)
set_bit(R5_WantFUA, &dev->flags);
if (sync)

View file

@ -570,7 +570,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
}
sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
(mode << 8) | (div % 0xff));
(mode << 8) | div);
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
@ -1540,7 +1540,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
mmc->f_min = host->src_clk_freq / (4 * 255);
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps |= MMC_CAP_RUNTIME_RESUME;

View file

@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
* Copyright (c) 2014 Broadcom Corporation
* Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -778,8 +778,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
/* Misc UniMAC counters */
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
UMAC_RBUF_OVFL_CNT_V1),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
UMAC_RBUF_ERR_CNT_V1),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@ -821,6 +822,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
}
}
static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
{
u16 new_offset;
u32 val;
switch (offset) {
case UMAC_RBUF_OVFL_CNT_V1:
if (GENET_IS_V2(priv))
new_offset = RBUF_OVFL_CNT_V2;
else
new_offset = RBUF_OVFL_CNT_V3PLUS;
val = bcmgenet_rbuf_readl(priv, new_offset);
/* clear if overflowed */
if (val == ~0)
bcmgenet_rbuf_writel(priv, 0, new_offset);
break;
case UMAC_RBUF_ERR_CNT_V1:
if (GENET_IS_V2(priv))
new_offset = RBUF_ERR_CNT_V2;
else
new_offset = RBUF_ERR_CNT_V3PLUS;
val = bcmgenet_rbuf_readl(priv, new_offset);
/* clear if overflowed */
if (val == ~0)
bcmgenet_rbuf_writel(priv, 0, new_offset);
break;
default:
val = bcmgenet_umac_readl(priv, offset);
/* clear if overflowed */
if (val == ~0)
bcmgenet_umac_writel(priv, 0, offset);
break;
}
return val;
}
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
{
int i, j = 0;
@ -836,19 +876,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
case BCMGENET_STAT_NETDEV:
case BCMGENET_STAT_SOFT:
continue;
case BCMGENET_STAT_MIB_RX:
case BCMGENET_STAT_MIB_TX:
case BCMGENET_STAT_RUNT:
if (s->type != BCMGENET_STAT_MIB_RX)
offset = BCMGENET_STAT_OFFSET;
offset += BCMGENET_STAT_OFFSET;
/* fall through */
case BCMGENET_STAT_MIB_TX:
offset += BCMGENET_STAT_OFFSET;
/* fall through */
case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
offset = 0; /* Reset Offset */
break;
case BCMGENET_STAT_MISC:
val = bcmgenet_umac_readl(priv, s->reg_offset);
/* clear if overflowed */
if (val == ~0)
bcmgenet_umac_writel(priv, 0, s->reg_offset);
if (GENET_IS_V1(priv)) {
val = bcmgenet_umac_readl(priv, s->reg_offset);
/* clear if overflowed */
if (val == ~0)
bcmgenet_umac_writel(priv, 0,
s->reg_offset);
} else {
val = bcmgenet_update_stat_misc(priv,
s->reg_offset);
}
break;
}
@ -2901,6 +2950,8 @@ err_irq0:
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
@ -3277,6 +3328,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
/* This is reserved so should require special treatment */
if (gphy_rev == 0 || gphy_rev == 0x01ff) {
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
return;
}
/* This is the good old scheme, just GPHY major, no minor nor patch */
if ((gphy_rev & 0xf0) != 0)
priv->gphy_rev = gphy_rev << 8;
@ -3285,12 +3342,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
else if ((gphy_rev & 0xff00) != 0)
priv->gphy_rev = gphy_rev;
/* This is reserved so should require special treatment */
else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
return;
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
pr_warn("GENET does not support 40-bits PA\n");
@ -3333,6 +3384,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
const void *macaddr;
struct resource *r;
int err = -EIO;
const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@ -3438,6 +3490,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_eee = NULL;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
!strcasecmp(phy_mode_str, "internal"))
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@ -3604,6 +3663,8 @@ static int bcmgenet_resume(struct device *d)
return 0;
out_clk_disable:
if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Broadcom Corporation
* Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
#define MDIO_REG_SHIFT 16
#define MDIO_REG_MASK 0x1F
#define UMAC_RBUF_OVFL_CNT 0x61C
#define UMAC_RBUF_OVFL_CNT_V1 0x61C
#define RBUF_OVFL_CNT_V2 0x80
#define RBUF_OVFL_CNT_V3PLUS 0x94
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MPD_PW_MS 0x624
#define UMAC_MPD_PW_LS 0x628
#define UMAC_RBUF_ERR_CNT 0x634
#define UMAC_RBUF_ERR_CNT_V1 0x634
#define RBUF_ERR_CNT_V2 0x84
#define RBUF_ERR_CNT_V3PLUS 0x98
#define UMAC_MDF_ERR_CNT 0x638
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654

View file

@ -2278,6 +2278,17 @@ static int sync_toggles(struct mlx4_dev *dev)
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
/* PCI might be offline */
/* If device removal has been requested,
* do not continue retrying.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT) {
mlx4_warn(dev,
"communication channel is offline\n");
return -EIO;
}
msleep(100);
wr_toggle = swab32(readl(&priv->mfunc.comm->
slave_write));

View file

@ -1763,6 +1763,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
(u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
if (!offline_bit)
return 0;
/* If device removal has been requested,
* do not continue retrying.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT)
break;
/* There are cases as part of AER/Reset flow that PF needs
* around 100 msec to load. We therefore sleep for 100 msec
* to allow other tasks to make use of that CPU during this
@ -3690,6 +3698,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct mlx4_priv *priv = mlx4_priv(dev);
int active_vfs = 0;
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
mutex_lock(&persist->interface_state_mutex);
persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
mutex_unlock(&persist->interface_state_mutex);

View file

@ -599,7 +599,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
#define MLXSW_REG_SPVM_ID 0x200F
#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
@ -1139,7 +1139,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
#define MLXSW_REG_SPVMLR_ID 0x2020
#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
MLXSW_REG_SPVMLR_REC_LEN * \
MLXSW_REG_SPVMLR_REC_MAX_COUNT)

View file

@ -4307,7 +4307,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
* MCFW do not support VFs.
*/
rc = efx_ef10_vport_set_mac_address(efx);
} else {
} else if (rc) {
efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
sizeof(inbuf), NULL, 0, rc);
}

View file

@ -1205,7 +1205,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
fjes_set_ethtool_ops(netdev);
netdev->mtu = fjes_support_mtu[0];
netdev->flags |= IFF_BROADCAST;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static void fjes_irq_watch_task(struct work_struct *work)

View file

@ -441,7 +441,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
struct macvlan_dev, list);
else
vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
return RX_HANDLER_PASS;
dev = vlan->dev;

View file

@ -942,6 +942,7 @@ static __net_exit void ppp_exit_net(struct net *net)
unregister_netdevice_many(&list);
rtnl_unlock();
mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
}

View file

@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);
if (iface->cur_altsetting->desc.bNumEndpoints < 4)
return -ENODEV;
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");

View file

@ -180,6 +180,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
ssize_t len;
int r;
if (count < 1)
return -EINVAL;
if (sc->cur_chan->nvifs > 1)
return -EOPNOTSUPP;
@ -187,6 +190,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (strtobool(buf, &start))
return -EINVAL;

View file

@ -233,6 +233,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
if (rtsta == (u32) ~0)
break;
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
@ -280,7 +283,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
if (!(rtsta & PCI_EXP_RTSTA_PME)) {
if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}

View file

@ -20,9 +20,9 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (dev->is_added) {
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
device_release_driver(&dev->dev);
dev->is_added = 0;
}

View file

@ -26,7 +26,8 @@ config DEBUG_PINCTRL
config PINCTRL_ADI2
bool "ADI pin controller driver"
depends on BLACKFIN
depends on (BF54x || BF60x)
depends on !GPIO_ADI
select PINMUX
select IRQ_DOMAIN
help

View file

@ -427,7 +427,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
return 0;
buf &= PCF8563_REG_CLKO_F_MASK;
return clkout_rates[ret];
return clkout_rates[buf];
}
static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,

View file

@ -254,7 +254,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
struct bfad_s *bfad = port->bfad;
struct bfa_s *bfa = &bfad->bfa;
struct bfa_ioc_s *ioc = &bfa->ioc;
int addr, len, rc, i;
int addr, rc, i;
u32 len;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
@ -265,7 +266,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
if (rc < 2 || len > (UINT_MAX >> 2)) {
printk(KERN_INFO
"bfad[%d]: %s failed to read user buf\n",
bfad->inst_no, __func__);

View file

@ -3466,7 +3466,7 @@ exit_failed:
* # (integer code indicating one of several NOT READY states
* describing why a volume is to be kept offline)
*/
static int hpsa_volume_offline(struct ctlr_info *h,
static unsigned char hpsa_volume_offline(struct ctlr_info *h,
unsigned char scsi3addr[])
{
struct CommandList *c;
@ -3486,7 +3486,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
if (rc) {
cmd_free(h, c);
return 0;
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
}
sense = c->err_info->SenseInfo;
if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@ -3497,19 +3497,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
cmd_status = c->err_info->CommandStatus;
scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
/* Is the volume 'not ready'? */
if (cmd_status != CMD_TARGET_STATUS ||
scsi_status != SAM_STAT_CHECK_CONDITION ||
sense_key != NOT_READY ||
asc != ASC_LUN_NOT_READY) {
return 0;
}
/* Determine the reason for not ready state */
ldstat = hpsa_get_volume_status(h, scsi3addr);
/* Keep volume offline in certain cases: */
switch (ldstat) {
case HPSA_LV_FAILED:
case HPSA_LV_UNDERGOING_ERASE:
case HPSA_LV_NOT_AVAILABLE:
case HPSA_LV_UNDERGOING_RPI:
@ -3531,7 +3525,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
default:
break;
}
return 0;
return HPSA_LV_OK;
}
/*
@ -3615,10 +3609,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
/* Do an inquiry to the device to see what it is. */
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
/* Inquiry failed (msg printed already) */
dev_err(&h->pdev->dev,
"hpsa_update_device_info: inquiry failed\n");
rc = -EIO;
"%s: inquiry failed, device will be skipped.\n",
__func__);
rc = HPSA_INQUIRY_FAILED;
goto bail_out;
}
@ -3638,15 +3632,19 @@ static int hpsa_update_device_info(struct ctlr_info *h,
if (this_device->devtype == TYPE_DISK &&
is_logical_dev_addr_mode(scsi3addr)) {
int volume_offline;
unsigned char volume_offline;
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
volume_offline = hpsa_volume_offline(h, scsi3addr);
if (volume_offline < 0 || volume_offline > 0xff)
volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
this_device->volume_offline = volume_offline & 0xff;
if (volume_offline == HPSA_LV_FAILED) {
rc = HPSA_LV_FAILED;
dev_err(&h->pdev->dev,
"%s: LV failed, device will be skipped.\n",
__func__);
goto bail_out;
}
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
@ -4115,8 +4113,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
goto out;
}
if (rc) {
dev_warn(&h->pdev->dev,
"Inquiry failed, skipping device.\n");
h->drv_req_rescan = 1;
continue;
}
@ -5257,7 +5254,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
spin_lock_irqsave(&h->scan_lock, flags);
h->scan_finished = 1;
wake_up_all(&h->scan_wait_queue);
wake_up(&h->scan_wait_queue);
spin_unlock_irqrestore(&h->scan_lock, flags);
}
@ -5275,11 +5272,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
/*
* If a scan is already waiting to run, no need to add another
*/
spin_lock_irqsave(&h->scan_lock, flags);
if (h->scan_waiting) {
spin_unlock_irqrestore(&h->scan_lock, flags);
return;
}
spin_unlock_irqrestore(&h->scan_lock, flags);
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
if (h->scan_finished)
break;
h->scan_waiting = 1;
spin_unlock_irqrestore(&h->scan_lock, flags);
wait_event(h->scan_wait_queue, h->scan_finished);
/* Note: We don't need to worry about a race between this
@ -5289,6 +5298,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
*/
}
h->scan_finished = 0; /* mark scan as in progress */
h->scan_waiting = 0;
spin_unlock_irqrestore(&h->scan_lock, flags);
if (unlikely(lockup_detected(h)))
@ -8505,6 +8515,7 @@ reinit_after_soft_reset:
init_waitqueue_head(&h->event_sync_wait_queue);
mutex_init(&h->reset_mutex);
h->scan_finished = 1; /* no scan currently in progress */
h->scan_waiting = 0;
pci_set_drvdata(pdev, h);
h->ndevices = 0;
@ -8797,6 +8808,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
hpsa_delete_sas_host(h);
/*
* Call before disabling interrupts.
* scsi_remove_host can trigger I/O operations especially
@ -8831,8 +8844,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
hpsa_delete_sas_host(h);
kfree(h); /* init_one 1 */
}
@ -9324,9 +9335,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
struct sas_phy *phy = hpsa_sas_phy->phy;
sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
sas_phy_free(phy);
if (hpsa_sas_phy->added_to_port)
list_del(&hpsa_sas_phy->phy_list_entry);
sas_phy_delete(phy);
kfree(hpsa_sas_phy);
}

View file

@ -200,6 +200,7 @@ struct ctlr_info {
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
int scan_finished;
u8 scan_waiting : 1;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;

View file

@ -155,6 +155,7 @@
#define CFGTBL_BusType_Fibre2G 0x00000200l
/* VPD Inquiry types */
#define HPSA_INQUIRY_FAILED 0x02
#define HPSA_VPD_SUPPORTED_PAGES 0x00
#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
@ -164,6 +165,7 @@
/* Logical volume states */
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
#define HPSA_LV_OK 0x0
#define HPSA_LV_FAILED 0x01
#define HPSA_LV_NOT_AVAILABLE 0x0b
#define HPSA_LV_UNDERGOING_ERASE 0x0F
#define HPSA_LV_UNDERGOING_RPI 0x12

View file

@ -160,7 +160,7 @@ static struct {
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},

View file

@ -233,11 +233,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
bool v;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
if (kstrtobool(buf, &v))
return -EINVAL;
sdp->manage_start_stop = v;
return count;
}
@ -255,6 +259,7 @@ static ssize_t
allow_restart_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
bool v;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@ -264,7 +269,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK)
return -EINVAL;
sdp->allow_restart = simple_strtoul(buf, NULL, 10);
if (kstrtobool(buf, &v))
return -EINVAL;
sdp->allow_restart = v;
return count;
}

View file

@ -1693,10 +1693,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
MACbShutdown(priv->PortOffset);
pci_disable_device(pcid);
pci_set_power_state(pcid, pci_choose_state(pcid, state));
spin_unlock_irqrestore(&priv->lock, flags);
pci_set_power_state(pcid, pci_choose_state(pcid, state));
return 0;
}

View file

@ -674,6 +674,7 @@ static int iscsit_add_reject_from_cmd(
unsigned char *buf)
{
struct iscsi_conn *conn;
const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@ -704,7 +705,7 @@ static int iscsit_add_reject_from_cmd(
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
if (cmd->se_cmd.se_tfo != NULL) {
if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}

View file

@ -1210,7 +1210,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
return NULL;
goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
@ -1222,6 +1222,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
free_out:
kfree(tpg);
return NULL;
}

View file

@ -1010,7 +1010,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@ -1073,17 +1073,8 @@ static int core_alua_do_transition_tg_pt(
/*
* Flush any pending transitions
*/
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
ALUA_ACCESS_STATE_TRANSITION) {
/* Just in case */
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
return 0;
}
if (!explicit)
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Save the old primary ALUA access state, and set the current state
@ -1114,17 +1105,9 @@ static int core_alua_do_transition_tg_pt(
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
unsigned long transition_tmo;
transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
&tg_pt_gp->tg_pt_gp_transition_work,
transition_tmo);
} else {
schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
if (explicit) {
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
&tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
}
@ -1692,8 +1675,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
core_alua_do_transition_tg_pt_work);
INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@ -1801,7 +1784,7 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by

View file

@ -466,6 +466,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
if (!nolb) {
return 0;
}
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)

View file

@ -56,8 +56,10 @@ void core_pr_dump_initiator_port(
char *buf,
u32 size)
{
if (!pr_reg->isid_present_at_reg)
if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
return;
}
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}

View file

@ -31,8 +31,7 @@
* If the temperature is higher than a trip point,
* a. if the trend is THERMAL_TREND_RAISING, use higher cooling
* state for this trip point
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
* state for this trip point
* b. if the trend is THERMAL_TREND_DROPPING, do nothing
* c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
* for this trip point
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (!throttle)
next_target = THERMAL_NO_TARGET;
} else {
next_target = cur_state - 1;
if (next_target > instance->upper)
next_target = instance->upper;
if (!throttle) {
next_target = cur_state - 1;
if (next_target > instance->upper)
next_target = instance->upper;
}
}
break;
case THERMAL_TREND_DROP_FULL:

View file

@ -521,6 +521,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
nintf = nintf_orig = config->desc.bNumInterfaces;
config->desc.bNumInterfaces = 0; // Adjusted later
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
@ -534,7 +537,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
buffer += config->desc.bLength;
size -= config->desc.bLength;
nintf = nintf_orig = config->desc.bNumInterfaces;
if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",

View file

@ -1017,10 +1017,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
return 0;
}
xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
if (!xhci->devs[slot_id])
dev = kzalloc(sizeof(*dev), flags);
if (!dev)
return 0;
dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@ -1068,9 +1067,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id],
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
xhci->devs[slot_id] = dev;
return 1;
fail:
xhci_free_virt_device(xhci, slot_id);
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
xhci_free_container_ctx(xhci, dev->out_ctx);
kfree(dev);
return 0;
}

View file

@ -350,7 +350,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
} else if (!(musb->int_usb & MUSB_INTR_BABBLE)){
/*
* When babble condition happens, drvvbus interrupt
* is also generated. Ignore this drvvbus interrupt
* and let babble interrupt handler recovers the
* controller; otherwise, the host-mode flag is lost
* due to the MUSB_DEV_MODE() call below and babble
* recovery logic will not called.
*/
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;

View file

@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
};
MODULE_DEVICE_TABLE(i2c, isp1301_id);
static const struct of_device_id isp1301_of_match[] = {
{.compatible = "nxp,isp1301" },
{ },
};
MODULE_DEVICE_TABLE(of, isp1301_of_match);
static struct i2c_client *isp1301_i2c_client;
static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
static struct i2c_driver isp1301_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(isp1301_of_match),
},
.probe = isp1301_probe,
.remove = isp1301_remove,

View file

@ -2149,6 +2149,13 @@ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
"JMicron",
"JMS567",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
/*
* Patch by Constantin Baranov <const@tltsu.ru>
* Report by Andreas Koenecke.

View file

@ -141,6 +141,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
"JMicron",
"JMS567",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",

View file

@ -178,6 +178,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
if (urb->actual_length > 0 && !urb->transfer_buffer) {
dev_err(&sdev->udev->dev,
"urb: actual_length %d transfer_buffer null\n",
urb->actual_length);
return -1;
}
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
iovnum = 2 + urb->number_of_packets;
else

View file

@ -1680,8 +1680,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
if (!fbi)
if (!fbi) {
ret = -ENOMEM;
goto failed;
}
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
@ -1699,7 +1701,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
ret = -ENOMEM;
goto failed;
}
/*

View file

@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
{{ 1, 2}}, /* 1152x870, 75Hz */
{{ 0, 1}}, /* 1280x960, 75Hz */
{{ 0, 1}}, /* 1280x1024, 75Hz */
{{ 1, 2}}, /* 1152x768, 60Hz */
{{ 0, 1}}, /* 1600x1024, 60Hz */
};

View file

@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
for (i = 0; i < len; i++) {
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0), (0x02),
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
HZ);
if (ret < 1) {
pr_err("Read EDID byte %d failed err %x\n", i, ret);
usb_rcvctrlpipe(dev->udev, 0), 0x02,
(0x80 | (0x02 << 5)), i << 8, 0xA1,
rbuf, 2, USB_CTRL_GET_TIMEOUT);
if (ret < 2) {
pr_err("Read EDID byte %d failed: %d\n", i, ret);
i--;
break;
}

View file

@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
{
struct afs_server *server;
struct afs_vnode *vnode, *xvnode;
time_t now;
time64_t now;
long timeout;
int ret;
@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
_enter("");
now = get_seconds();
now = ktime_get_real_seconds();
/* find the first vnode to update */
spin_lock(&server->cb_lock);
@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
vnode->update_at = get_seconds() + afs_vnode_update_timeout;
vnode->update_at = ktime_get_real_seconds() +
afs_vnode_update_timeout;
spin_lock(&server->cb_lock);

View file

@ -29,6 +29,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
const struct file_operations afs_file_operations = {
.open = afs_open,
.flush = afs_flush,
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,

View file

@ -105,7 +105,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_mode = mode;
}
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_version = data_version;
@ -139,7 +139,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
vnode->cb_version = ntohl(*bp++);
vnode->cb_expiry = ntohl(*bp++);
vnode->cb_type = ntohl(*bp++);
vnode->cb_expires = vnode->cb_expiry + get_seconds();
vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds();
*_bp = bp;
}
@ -703,8 +703,8 @@ int afs_fs_create(struct afs_server *server,
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
*bp++ = htonl(AFS_SET_MODE);
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@ -981,8 +981,8 @@ int afs_fs_symlink(struct afs_server *server,
memset(bp, 0, c_padsz);
bp = (void *) bp + c_padsz;
}
*bp++ = htonl(AFS_SET_MODE);
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
@ -1192,8 +1192,8 @@ static int afs_fs_store_data64(struct afs_server *server,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
*bp++ = 0; /* mask */
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MTIME); /* mask */
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
@ -1225,7 +1225,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
_enter(",%x,{%x:%u},,",
key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
size = to - offset;
size = (loff_t)to - (loff_t)offset;
if (first != last)
size += (loff_t)(last - first) << PAGE_SHIFT;
pos = (loff_t)first << PAGE_SHIFT;
@ -1269,8 +1269,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
*bp++ = 0; /* mask */
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MTIME); /* mask */
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */

View file

@ -69,9 +69,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
set_nlink(inode, vnode->status.nlink);
inode->i_uid = vnode->status.owner;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_gid = vnode->status.group;
inode->i_size = vnode->status.size;
inode->i_ctime.tv_sec = vnode->status.mtime_server;
inode->i_ctime.tv_sec = vnode->status.mtime_client;
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
@ -244,12 +244,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
vnode->cb_version = 0;
vnode->cb_expiry = 0;
vnode->cb_type = 0;
vnode->cb_expires = get_seconds();
vnode->cb_expires = ktime_get_real_seconds();
} else {
vnode->cb_version = cb->version;
vnode->cb_expiry = cb->expiry;
vnode->cb_type = cb->type;
vnode->cb_expires = vnode->cb_expiry + get_seconds();
vnode->cb_expires = vnode->cb_expiry +
ktime_get_real_seconds();
}
}
@ -322,7 +323,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
!test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
if (vnode->cb_expires < get_seconds() + 10) {
if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
_debug("callback expired");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
} else {

View file

@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/skbuff.h>
@ -247,7 +248,7 @@ struct afs_cache_vhash {
*/
struct afs_vlocation {
atomic_t usage;
time_t time_of_death; /* time at which put reduced usage to 0 */
time64_t time_of_death; /* time at which put reduced usage to 0 */
struct list_head link; /* link in cell volume location list */
struct list_head grave; /* link in master graveyard list */
struct list_head update; /* link in master update list */
@ -258,7 +259,7 @@ struct afs_vlocation {
struct afs_cache_vlocation vldb; /* volume information DB record */
struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
wait_queue_head_t waitq; /* status change waitqueue */
time_t update_at; /* time at which record should be updated */
time64_t update_at; /* time at which record should be updated */
spinlock_t lock; /* access lock */
afs_vlocation_state_t state; /* volume location state */
unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
@ -271,7 +272,7 @@ struct afs_vlocation {
*/
struct afs_server {
atomic_t usage;
time_t time_of_death; /* time at which put reduced usage to 0 */
time64_t time_of_death; /* time at which put reduced usage to 0 */
struct in_addr addr; /* server address */
struct afs_cell *cell; /* cell in which server resides */
struct list_head link; /* link in cell's server list */
@ -374,8 +375,8 @@ struct afs_vnode {
struct rb_node server_rb; /* link in server->fs_vnodes */
struct rb_node cb_promise; /* link in server->cb_promises */
struct work_struct cb_broken_work; /* work to be done on callback break */
time_t cb_expires; /* time at which callback expires */
time_t cb_expires_at; /* time used to order cb_promise */
time64_t cb_expires; /* time at which callback expires */
time64_t cb_expires_at; /* time used to order cb_promise */
unsigned cb_version; /* callback version */
unsigned cb_expiry; /* callback expiry time */
afs_callback_type_t cb_type; /* type of callback */
@ -749,6 +750,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_writeback_all(struct afs_vnode *);
extern int afs_flush(struct file *, fl_owner_t);
extern int afs_fsync(struct file *, loff_t, loff_t, int);

View file

@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
} else {
if (!(access & AFS_ACE_LOOKUP))
goto permission_denied;
if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
goto permission_denied;
if (mask & (MAY_EXEC | MAY_READ)) {
if (!(access & AFS_ACE_READ))
goto permission_denied;
if (!(inode->i_mode & S_IRUSR))
goto permission_denied;
} else if (mask & MAY_WRITE) {
if (!(access & AFS_ACE_WRITE))
goto permission_denied;
if (!(inode->i_mode & S_IWUSR))
goto permission_denied;
}
}
key_put(key);
ret = generic_permission(inode, mask);
_leave(" = %d", ret);
return ret;

View file

@ -237,7 +237,7 @@ void afs_put_server(struct afs_server *server)
spin_lock(&afs_server_graveyard_lock);
if (atomic_read(&server->usage) == 0) {
list_move_tail(&server->grave, &afs_server_graveyard);
server->time_of_death = get_seconds();
server->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_server_reaper,
afs_server_timeout * HZ);
}
@ -272,9 +272,9 @@ static void afs_reap_server(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_server *server;
unsigned long delay, expiry;
time_t now;
time64_t now;
now = get_seconds();
now = ktime_get_real_seconds();
spin_lock(&afs_server_graveyard_lock);
while (!list_empty(&afs_server_graveyard)) {

View file

@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
struct afs_vlocation *xvl;
/* wait at least 10 minutes before updating... */
vl->update_at = get_seconds() + afs_vlocation_update_timeout;
vl->update_at = ktime_get_real_seconds() +
afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
if (atomic_read(&vl->usage) == 0) {
_debug("buried");
list_move_tail(&vl->grave, &afs_vlocation_graveyard);
vl->time_of_death = get_seconds();
vl->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_vlocation_reap,
afs_vlocation_timeout * HZ);
@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_vlocation *vl;
unsigned long delay, expiry;
time_t now;
time64_t now;
_enter("");
now = get_seconds();
now = ktime_get_real_seconds();
spin_lock(&afs_vlocation_graveyard_lock);
while (!list_empty(&afs_vlocation_graveyard)) {
@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
{
struct afs_cache_vlocation vldb;
struct afs_vlocation *vl, *xvl;
time_t now;
time64_t now;
long timeout;
int ret;
_enter("");
now = get_seconds();
now = ktime_get_real_seconds();
/* find a record to update */
spin_lock(&afs_vlocation_updates_lock);
@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
vl->update_at = get_seconds() + afs_vlocation_update_timeout;
vl->update_at = ktime_get_real_seconds() +
afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);

View file

@ -148,12 +148,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
kfree(candidate);
return -ENOMEM;
}
*pagep = page;
/* page won't leak in error case: it eventually gets cleaned off LRU */
if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
if (ret < 0) {
unlock_page(page);
put_page(page);
kfree(candidate);
_leave(" = %d [prep]", ret);
return ret;
@ -161,6 +161,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
SetPageUptodate(page);
}
/* page won't leak in error case: it eventually gets cleaned off LRU */
*pagep = page;
try_again:
spin_lock(&vnode->writeback_lock);
@ -296,10 +299,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
ClearPageUptodate(pv.pages[loop]);
struct page *page = pv.pages[loop];
ClearPageUptodate(page);
if (error)
SetPageError(pv.pages[loop]);
end_page_writeback(pv.pages[loop]);
SetPageError(page);
if (PageWriteback(page))
end_page_writeback(page);
if (page->index >= first)
first = page->index + 1;
}
__pagevec_release(&pv);
@ -503,6 +510,7 @@ static int afs_writepages_region(struct address_space *mapping,
if (PageWriteback(page) || !PageDirty(page)) {
unlock_page(page);
put_page(page);
continue;
}
@ -739,6 +747,20 @@ out:
return ret;
}
/*
* Flush out all outstanding writes on a file opened for writing when it is
* closed.
*/
int afs_flush(struct file *file, fl_owner_t id)
{
_enter("");
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
return vfs_fsync(file, 0);
}
/*
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal

View file

@ -174,7 +174,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
mutex_unlock(&sbi->wq_mutex);
if (autofs4_write(sbi, pipe, &pkt, pktsz))
switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
case 0:
break;

View file

@ -6735,6 +6735,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
/*
* decompression code contains a memset to fill in any space between the end
* of the uncompressed data and the end of max_size in case the decompressed
* data ends up shorter than ram_bytes. That doesn't cover the hole between
* the end of an inline extent and the beginning of the next block, so we
* cover that region here.
*/
if (max_size + pg_offset < PAGE_SIZE) {
char *map = kmap(page);
memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
kunmap(page);
}
kfree(tmp);
return ret;
}

View file

@ -1400,6 +1400,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
return request_close_session(mdsc, session);
}
static bool drop_negative_children(struct dentry *dentry)
{
struct dentry *child;
bool all_negative = true;
if (!d_is_dir(dentry))
goto out;
spin_lock(&dentry->d_lock);
list_for_each_entry(child, &dentry->d_subdirs, d_child) {
if (d_really_is_positive(child)) {
all_negative = false;
break;
}
}
spin_unlock(&dentry->d_lock);
if (all_negative)
shrink_dcache_parent(dentry);
out:
return all_negative;
}
/*
* Trim old(er) caps.
*
@ -1445,16 +1468,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if ((used | wanted) & ~oissued & mine)
goto out; /* we need these caps */
session->s_trim_caps--;
if (oissued) {
/* we aren't the only cap.. just remove us */
__ceph_remove_cap(cap, true);
session->s_trim_caps--;
} else {
struct dentry *dentry;
/* try dropping referring dentries */
spin_unlock(&ci->i_ceph_lock);
d_prune_aliases(inode);
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, atomic_read(&inode->i_count));
dentry = d_find_any_alias(inode);
if (dentry && drop_negative_children(dentry)) {
int count;
dput(dentry);
d_prune_aliases(inode);
count = atomic_read(&inode->i_count);
if (count == 1)
session->s_trim_caps--;
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, count);
} else {
dput(dentry);
}
return 0;
}

View file

@ -4738,6 +4738,7 @@ retry:
EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;

View file

@ -1403,6 +1403,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
"falling back\n"));
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
if (!nblocks) {
ret = NULL;
goto cleanup_and_exit;
}
start = EXT4_I(dir)->i_dir_start_lookup;
if (start >= nblocks)
start = 0;

Some files were not shown because too many files have changed in this diff Show more