Merge android-4.4.129 (b1c4836) into msm-4.4

* refs/heads/tmp-b1c4836
  Linux 4.4.129
  writeback: safer lock nesting
  fanotify: fix logic of events on child
  ext4: bugfix for mmaped pages in mpage_release_unused_pages()
  mm/filemap.c: fix NULL pointer in page_cache_tree_insert()
  mm: allow GFP_{FS,IO} for page_cache_read page cache allocation
  autofs: mount point create should honour passed in mode
  Don't leak MNT_INTERNAL away from internal mounts
  rpc_pipefs: fix double-dput()
  hypfs_kill_super(): deal with failed allocations
  jffs2_kill_sb(): deal with failed allocations
  powerpc/lib: Fix off-by-one in alternate feature patching
  powerpc/eeh: Fix enabling bridge MMIO windows
  MIPS: memset.S: Fix clobber of v1 in last_fixup
  MIPS: memset.S: Fix return of __clear_user from Lpartial_fixup
  MIPS: memset.S: EVA & fault support for small_memset
  MIPS: uaccess: Add micromips clobbers to bzero invocation
  HID: hidraw: Fix crash on HIDIOCGFEATURE with a destroyed device
  ALSA: hda - New VIA controller suppor no-snoop path
  ALSA: rawmidi: Fix missing input substream checks in compat ioctls
  ALSA: line6: Use correct endpoint type for midi output
  ext4: fix deadlock between inline_data and ext4_expand_extra_isize_ea()
  ext4: fix crashes in dioread_nolock mode
  drm/radeon: Fix PCIe lane width calculation
  ext4: don't allow r/w mounts if metadata blocks overlap the superblock
  vfio/pci: Virtualize Maximum Read Request Size
  vfio/pci: Virtualize Maximum Payload Size
  vfio-pci: Virtualize PCIe & AF FLR
  ALSA: pcm: Fix endless loop for XRUN recovery in OSS emulation
  ALSA: pcm: Fix mutex unbalance in OSS emulation ioctls
  ALSA: pcm: Return -EBUSY for OSS ioctls changing busy streams
  ALSA: pcm: Avoid potential races between OSS ioctls and read/write
  ALSA: pcm: Use ERESTARTSYS instead of EINTR in OSS emulation
  ALSA: oss: consolidate kmalloc/memset 0 call to kzalloc
  watchdog: f71808e_wdt: Fix WD_EN register read
  thermal: imx: Fix race condition in imx_thermal_probe()
  clk: bcm2835: De-assert/assert PLL reset signal when appropriate
  clk: mvebu: armada-38x: add support for missing clocks
  clk: mvebu: armada-38x: add support for 1866MHz variants
  mmc: jz4740: Fix race condition in IRQ mask update
  iommu/vt-d: Fix a potential memory leak
  um: Use POSIX ucontext_t instead of struct ucontext
  dmaengine: at_xdmac: fix rare residue corruption
  IB/srp: Fix completion vector assignment algorithm
  IB/srp: Fix srp_abort()
  ALSA: pcm: Fix UAF at PCM release via PCM timer access
  RDMA/ucma: Don't allow setting RDMA_OPTION_IB_PATH without an RDMA device
  ext4: fail ext4_iget for root directory if unallocated
  ext4: don't update checksum of new initialized bitmaps
  jbd2: if the journal is aborted then don't allow update of the log tail
  random: use a tighter cap in credit_entropy_bits_safe()
  thunderbolt: Resume control channel after hibernation image is created
  ASoC: ssm2602: Replace reg_default_raw with reg_default
  HID: core: Fix size as type u32
  HID: Fix hid_report_len usage
  powerpc/powernv: Fix OPAL NVRAM driver OPAL_BUSY loops
  powerpc/powernv: define a standard delay for OPAL_BUSY type retry loops
  powerpc/64: Fix smp_wmb barrier definition use use lwsync consistently
  powerpc/powernv: Handle unknown OPAL errors in opal_nvram_write()
  HID: i2c-hid: fix size check and type usage
  usb: dwc3: pci: Properly cleanup resource
  USB:fix USB3 devices behind USB3 hubs not resuming at hibernate thaw
  ACPI / hotplug / PCI: Check presence of slot itself in get_slot_status()
  ACPI / video: Add quirk to force acpi-video backlight on Samsung 670Z5E
  regmap: Fix reversed bounds check in regmap_raw_write()
  xen-netfront: Fix hang on device removal
  ARM: dts: at91: sama5d4: fix pinctrl compatible string
  ARM: dts: at91: at91sam9g25: fix mux-mask pinctrl property
  usb: musb: gadget: misplaced out of bounds check
  mm, slab: reschedule cache_reap() on the same CPU
  ipc/shm: fix use-after-free of shm file via remap_file_pages()
  resource: fix integer overflow at reallocation
  fs/reiserfs/journal.c: add missing resierfs_warning() arg
  ubi: Reject MLC NAND
  ubi: Fix error for write access
  ubi: fastmap: Don't flush fastmap work on detach
  ubifs: Check ubifs_wbuf_sync() return code
  tty: make n_tty_read() always abort if hangup is in progress
  x86/hweight: Don't clobber %rdi
  x86/hweight: Get rid of the special calling convention
  lan78xx: Correctly indicate invalid OTP
  slip: Check if rstate is initialized before uncompressing
  cdc_ether: flag the Cinterion AHS8 modem by gemalto as WWAN
  hwmon: (ina2xx) Fix access to uninitialized mutex
  rtl8187: Fix NULL pointer dereference in priv->conf_mutex
  getname_kernel() needs to make sure that ->name != ->iname in long case
  s390/ipl: ensure loadparm valid flag is set
  s390/qdio: don't merge ERROR output buffers
  s390/qdio: don't retry EQBS after CCQ 96
  block/loop: fix deadlock after loop_set_status
  Revert "perf tests: Decompress kernel module before objdump"
  radeon: hide pointless #warning when compile testing
  perf intel-pt: Fix timestamp following overflow
  perf intel-pt: Fix error recovery from missing TIP packet
  perf intel-pt: Fix sync_switch
  perf intel-pt: Fix overlap detection to identify consecutive buffers correctly
  parisc: Fix out of array access in match_pci_device()
  media: v4l2-compat-ioctl32: don't oops on overlay
  f2fs: check cap_resource only for data blocks
  Revert "f2fs: introduce f2fs_set_page_dirty_nobuffer"
  f2fs: clear PageError on writepage
  UPSTREAM: timer: Export destroy_hrtimer_on_stack()
  BACKPORT: dm verity: add 'check_at_most_once' option to only validate hashes once
  f2fs: call unlock_new_inode() before d_instantiate()
  f2fs: refactor read path to allow multiple postprocessing steps
  fscrypt: allow synchronous bio decryption

Change-Id: I45f4ac10734d92023b53118d83dcd6c83974a283
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2018-04-24 19:07:31 +05:30
commit ee76c85f71
114 changed files with 1103 additions and 544 deletions

View file

@ -109,6 +109,17 @@ fec_start <offset>
This is the offset, in <data_block_size> blocks, from the start of the
FEC device to the beginning of the encoding data.
check_at_most_once
Verify data blocks only the first time they are read from the data device,
rather than every time. This reduces the overhead of dm-verity so that it
can be used on systems that are memory and/or CPU constrained. However, it
provides a reduced level of security because only offline tampering of the
data device's content will be detected, not online tampering.
Hash blocks are still verified each time they are read from the hash device,
since verification of hash blocks is less performance critical than data
blocks, and a hash block will not be verified any more after all the data
blocks it covers have been verified anyway.
Theory of operation
===================

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 128
SUBLEVEL = 129
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -21,7 +21,7 @@
atmel,mux-mask = <
/* A B C */
0xffffffff 0xffe0399f 0xc000001c /* pioA */
0x0007ffff 0x8000fe3f 0x00000000 /* pioB */
0x0007ffff 0x00047e3f 0x00000000 /* pioB */
0x80000000 0x07c0ffff 0xb83fffff /* pioC */
0x003fffff 0x003f8000 0x00000000 /* pioD */
>;

View file

@ -1354,7 +1354,7 @@
pinctrl@fc06a000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
ranges = <0xfc068000 0xfc068000 0x100
0xfc06a000 0xfc06a000 0x4000>;
/* WARNING: revisit as pin spec has changed */

View file

@ -1238,6 +1238,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
#ifdef CONFIG_CPU_MICROMIPS
/* micromips memset / bzero also clobbers t7 & t8 */
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
#else
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
#endif /* CONFIG_CPU_MICROMIPS */
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
@ -1247,7 +1254,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
: bzero_clobbers);
} else {
might_fault();
__asm__ __volatile__(
@ -1258,7 +1265,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
: bzero_clobbers);
}
return res;

View file

@ -218,7 +218,7 @@
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
bne t1, a0, 1b
sb a1, -1(a0)
EX(sb, a1, -1(a0), .Lsmall_fixup\@)
2: jr ra /* done */
move a2, zero
@ -249,13 +249,18 @@
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
LONG_ADDU a2, a0
jr ra
LONG_SUBU a2, t0
.Llast_fixup\@:
jr ra
andi v1, a2, STORMASK
nop
.Lsmall_fixup\@:
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
.endm

View file

@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
(modpath->mod == PCI_FUNC(devfn)));
}
/* index might be out of bounds for bc[] */
if (index >= 6)
return 0;
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}

View file

@ -36,7 +36,8 @@
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#ifdef __SUBARCH_HAS_LWSYNC
/* The sub-arch has lwsync */
#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
# define SMPWMB LWSYNC
#else
# define SMPWMB eieio

View file

@ -21,6 +21,9 @@
/* We calculate number of sg entries based on PAGE_SIZE */
#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
#define OPAL_BUSY_DELAY_MS 10
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;

View file

@ -5,10 +5,6 @@
#include <linux/stringify.h>
#include <asm/feature-fixups.h>
#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
#define __SUBARCH_HAS_LWSYNC
#endif
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,

View file

@ -788,7 +788,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
eeh_bridge_check_link(edev);

View file

@ -53,7 +53,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
unsigned int *target = (unsigned int *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target >= alt_end) {
if (target < alt_start || target > alt_end) {
instr = translate_branch(dest, src);
if (!instr)
return 1;

View file

@ -11,6 +11,7 @@
#define DEBUG
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
if (rc == OPAL_BUSY_EVENT)
if (rc == OPAL_BUSY_EVENT) {
msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
} else if (rc == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
}
}
if (rc)
return -EIO;
*index += count;
return count;
}

View file

@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb)
if (sb->s_root)
hypfs_delete_tree(sb->s_root);
if (sb_info->update_file)
if (sb_info && sb_info->update_file)
hypfs_remove(sb_info->update_file);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;

View file

@ -798,6 +798,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
/* copy and convert to ebcdic */
memcpy(ipb->hdr.loadparm, buf, lp_len);
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
return len;
}

View file

@ -140,7 +140,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
static void hard_handler(int sig, siginfo_t *si, void *p)
{
struct ucontext *uc = p;
ucontext_t *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
unsigned long pending = 1UL << sig;

View file

@ -10,7 +10,7 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
struct ucontext *uc = p;
ucontext_t *uc = p;
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);

View file

@ -205,6 +205,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
"3570R/370R/470R/450R/510R/4450RV"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
.callback = video_detect_force_video,
.ident = "SAMSUNG 670Z5E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
.callback = video_detect_force_video,

View file

@ -1582,7 +1582,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (map->max_raw_write && map->max_raw_write > val_len)
if (map->max_raw_write && map->max_raw_write < val_len)
return -E2BIG;
map->lock(map->lock_arg);

View file

@ -1121,11 +1121,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
if (type >= MAX_LO_CRYPT)
return -EINVAL;
if (type >= MAX_LO_CRYPT) {
err = -EINVAL;
goto exit;
}
xfer = xfer_funcs[type];
if (xfer == NULL)
return -EINVAL;
if (xfer == NULL) {
err = -EINVAL;
goto exit;
}
} else
xfer = NULL;

View file

@ -724,7 +724,7 @@ retry:
static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
const int nbits_max = r->poolinfo->poolwords * 32;
if (nbits < 0)
return -EINVAL;

View file

@ -891,9 +891,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
const struct bcm2835_pll_data *data = pll->data;
spin_lock(&cprman->regs_lock);
cprman_write(cprman, data->cm_ctrl_reg,
cprman_read(cprman, data->cm_ctrl_reg) |
CM_PLL_ANARST);
cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PWRDN);
@ -929,6 +927,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
cpu_relax();
}
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PRST_DISABLE);
return 0;
}

View file

@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
}
static const u32 armada_38x_cpu_frequencies[] __initconst = {
0, 0, 0, 0,
1066 * 1000 * 1000, 0, 0, 0,
666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1332 * 1000 * 1000, 0, 0, 0,
1600 * 1000 * 1000,
1600 * 1000 * 1000, 0, 0, 0,
1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
};
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {1, 2},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {7, 15},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},

View file

@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
rmb();
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();

View file

@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
* may be slow
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/
#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
#endif
if (bo->flags & RADEON_GEM_GTT_WC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "

View file

@ -5964,9 +5964,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
(radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
(radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);

View file

@ -1331,7 +1331,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
* of implement() working on 8 byte chunks
*/
int len = hid_report_len(report) + 7;
u32 len = hid_report_len(report) + 7;
return kmalloc(len, flags);
}
@ -1396,7 +1396,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
{
char *buf;
int ret;
int len;
u32 len;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
@ -1422,14 +1422,14 @@ out:
}
EXPORT_SYMBOL_GPL(__hid_request);
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt)
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
unsigned int a;
int rsize, csize = size;
u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
*
* This is data entry for lower layers.
*/
int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;

View file

@ -1258,7 +1258,8 @@ static void hidinput_led_worker(struct work_struct *work)
led_work);
struct hid_field *field;
struct hid_report *report;
int len, ret;
int ret;
u32 len;
__u8 *buf;
field = hidinput_get_led_field(hid);

View file

@ -314,7 +314,8 @@ static struct attribute_group mt_attribute_group = {
static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hdev);
int ret, size = hid_report_len(report);
int ret;
u32 size = hid_report_len(report);
u8 *buf;
/*
@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
struct hid_report_enum *re;
struct mt_class *cls = &td->mtclass;
char *buf;
int report_len;
u32 report_len;
if (td->inputmode < 0)
return;

View file

@ -110,8 +110,8 @@ struct rmi_data {
u8 *writeReport;
u8 *readReport;
int input_report_size;
int output_report_size;
u32 input_report_size;
u32 output_report_size;
unsigned long flags;

View file

@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
ret = -ENODEV;
goto out;
}
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {

View file

@ -137,10 +137,10 @@ struct i2c_hid {
* register of the HID
* descriptor. */
unsigned int bufsize; /* i2c buffer size */
char *inbuf; /* Input buffer */
char *rawbuf; /* Raw Input buffer */
char *cmdbuf; /* Command buffer */
char *argsbuf; /* Command arguments buffer */
u8 *inbuf; /* Input buffer */
u8 *rawbuf; /* Raw Input buffer */
u8 *cmdbuf; /* Command buffer */
u8 *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
@ -387,7 +387,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
int ret, ret_size;
int ret;
u32 ret_size;
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
if (size > ihid->bufsize)
@ -412,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
if (ret_size > size) {
if ((ret_size > size) || (ret_size <= 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;

View file

@ -447,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[id->driver_data];
mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@ -473,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
mutex_init(&data->config_lock);
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;

View file

@ -1230,6 +1230,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
if (!optlen)
return -EINVAL;
if (!ctx->cm_id->device)
return -EINVAL;
memset(&sa_path, 0, sizeof(sa_path));
ib_sa_unpack_path(path_data->path_rec, &sa_path);

View file

@ -2581,9 +2581,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
ret = FAST_IO_FAIL;
else
ret = FAILED;
srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16;
scmnd->scsi_done(scmnd);
if (ret == SUCCESS) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16;
scmnd->scsi_done(scmnd);
}
return ret;
}
@ -3309,12 +3311,10 @@ static ssize_t srp_create_target(struct device *dev,
num_online_nodes());
const int ch_end = ((node_idx + 1) * target->ch_count /
num_online_nodes());
const int cv_start = (node_idx * ibdev->num_comp_vectors /
num_online_nodes() + target->comp_vector)
% ibdev->num_comp_vectors;
const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
num_online_nodes() + target->comp_vector)
% ibdev->num_comp_vectors;
const int cv_start = node_idx * ibdev->num_comp_vectors /
num_online_nodes();
const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
num_online_nodes();
int cpu_idx = 0;
for_each_online_cpu(cpu) {

View file

@ -389,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
kfree(sdev);
goto out;
}
svm->pasid = ret;

View file

@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/vmalloc.h>
#define DM_MSG_PREFIX "verity"
@ -32,6 +33,7 @@
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
@ -398,6 +400,18 @@ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
return 0;
}
/*
* Moves the bio iter one data block forward.
*/
static inline void verity_bv_skip_block(struct dm_verity *v,
struct dm_verity_io *io,
struct bvec_iter *iter)
{
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
}
/*
* Verify one "dm_verity_io" structure.
*/
@ -410,9 +424,16 @@ static int verity_verify_io(struct dm_verity_io *io)
for (b = 0; b < io->n_blocks; b++) {
int r;
sector_t cur_block = io->block + b;
struct shash_desc *desc = verity_io_hash_desc(v, io);
r = verity_hash_for_block(v, io, io->block + b,
if (v->validated_blocks &&
likely(test_bit(cur_block, v->validated_blocks))) {
verity_bv_skip_block(v, io, &io->iter);
continue;
}
r = verity_hash_for_block(v, io, cur_block,
verity_io_want_digest(v, io),
&is_zero);
if (unlikely(r < 0))
@ -445,13 +466,16 @@ static int verity_verify_io(struct dm_verity_io *io)
return r;
if (likely(memcmp(verity_io_real_digest(v, io),
verity_io_want_digest(v, io), v->digest_size) == 0))
verity_io_want_digest(v, io), v->digest_size) == 0)) {
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
}
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
io->block + b, NULL, &start) == 0)
cur_block, NULL, &start) == 0)
continue;
else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
io->block + b))
cur_block))
return -EIO;
}
@ -645,6 +669,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
args += DM_VERITY_OPTS_FEC;
if (v->zero_digest)
args++;
if (v->validated_blocks)
args++;
if (!args)
return;
DMEMIT(" %u", args);
@ -663,6 +689,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
}
if (v->zero_digest)
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
sz = verity_fec_status_table(v, sz, result, maxlen);
break;
}
@ -716,6 +744,7 @@ void verity_dtr(struct dm_target *ti)
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
vfree(v->validated_blocks);
kfree(v->salt);
kfree(v->root_digest);
kfree(v->zero_digest);
@ -737,6 +766,26 @@ void verity_dtr(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(verity_dtr);
static int verity_alloc_most_once(struct dm_verity *v)
{
struct dm_target *ti = v->ti;
/* the bitset can only handle INT_MAX blocks */
if (v->data_blocks > INT_MAX) {
ti->error = "device too large to use check_at_most_once";
return -E2BIG;
}
v->validated_blocks = vzalloc(BITS_TO_LONGS(v->data_blocks) *
sizeof(unsigned long));
if (!v->validated_blocks) {
ti->error = "failed to allocate bitset for check_at_most_once";
return -ENOMEM;
}
return 0;
}
static int verity_alloc_zero_digest(struct dm_verity *v)
{
int r = -ENOMEM;
@ -806,6 +855,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
}
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
r = verity_alloc_most_once(v);
if (r)
return r;
continue;
} else if (verity_is_fec_opt_arg(arg_name)) {
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
@ -1074,7 +1129,7 @@ EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",
.version = {1, 3, 0},
.version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,

View file

@ -63,6 +63,7 @@ struct dm_verity {
sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
struct dm_verity_fec *fec; /* forward error correction */
unsigned long *validated_blocks; /* bitset blocks validated */
};
struct dm_verity_io {

View file

@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
static int put_v4l2_window32(struct v4l2_window __user *kp,
struct v4l2_window32 __user *up)
{
struct v4l2_clip __user *kclips = kp->clips;
struct v4l2_clip __user *kclips;
struct v4l2_clip32 __user *uclips;
compat_caddr_t p;
u32 clipcount;
@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
if (!clipcount)
return 0;
if (get_user(kclips, &kp->clips))
return -EFAULT;
if (get_user(p, &up->clips))
return -EFAULT;
uclips = compat_ptr(p);

View file

@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
host->irq_mask &= ~irq;
else
host->irq_mask |= irq;
spin_unlock_irqrestore(&host->lock, flags);
writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
spin_unlock_irqrestore(&host->lock, flags);
}
static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,

View file

@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* in any case.
*/
if (mode & FMODE_WRITE) {
ret = -EPERM;
ret = -EROFS;
goto out_unlock;
}

View file

@ -951,6 +951,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
/*
* Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
* MLC NAND is different and needs special care, otherwise UBI or UBIFS
* will die soon and you will lose all your data.
*/
if (mtd->type == MTD_MLCNANDFLASH) {
pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)

View file

@ -360,7 +360,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);

View file

@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
if(x < 0 || x > comp->rslot_limit)
goto bad;
/* Check if the cstate is initialized */
if (!comp->rstate[x].initialized)
goto bad;
comp->flags &=~ SLF_TOSS;
comp->recv_current = x;
} else {
@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
if (cs->cs_tcp.doff > 5)
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
*/

View file

@ -704,6 +704,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* Cinterion AHS3 modem by GEMALTO */
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* Telit modules */
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,

View file

@ -618,7 +618,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
offset += 0x100;
else
ret = -EINVAL;
ret = lan78xx_read_raw_otp(dev, offset, length, data);
if (!ret)
ret = lan78xx_read_raw_otp(dev, offset, length, data);
}
return ret;

View file

@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
goto err_free_dev;
}
mutex_init(&priv->io_mutex);
mutex_init(&priv->conf_mutex);
SET_IEEE80211_DEV(dev, &intf->dev);
usb_set_intfdata(intf, dev);
@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
printk(KERN_ERR "rtl8187: Cannot register device\n");
goto err_free_dmabuf;
}
mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",

View file

@ -2024,7 +2024,10 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
break;
case XenbusStateUnknown:
wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@ -2155,7 +2158,9 @@ static int xennet_remove(struct xenbus_device *dev)
xenbus_switch_state(dev, XenbusStateClosing);
wait_event(module_unload_q,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing);
XenbusStateClosing ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
wait_event(module_unload_q,

View file

@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
unsigned long long sta = 0;
struct acpiphp_func *func;
u32 dvid;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
if (ACPI_SUCCESS(status) && sta)
break;
} else {
u32 dvid;
pci_bus_read_config_dword(slot->bus,
PCI_DEVFN(slot->device,
func->function),
PCI_VENDOR_ID, &dvid);
if (dvid != 0xffffffff) {
if (pci_bus_read_dev_vendor_id(slot->bus,
PCI_DEVFN(slot->device, func->function),
&dvid, 0)) {
sta = ACPI_STA_ALL;
break;
}
}
}
if (!sta) {
/*
* Check for the slot itself since it may be that the
* ACPI slot is a device below PCIe upstream port so in
* that case it may not even be reachable yet.
*/
if (pci_bus_read_dev_vendor_id(slot->bus,
PCI_DEVFN(slot->device, 0), &dvid, 0)) {
sta = ACPI_STA_ALL;
}
}
return (unsigned int)sta;
}

View file

@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
int rc, tmp_count = count, tmp_start = start, nr = q->nr;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
@ -149,14 +149,7 @@ again:
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
/*
* Retry once, if that fails bail out and process the
* extracted buffers before trying again.
*/
if (!retried++)
goto again;
else
return count - tmp_count;
return count - tmp_count;
}
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
@ -212,7 +205,10 @@ again:
return 0;
}
/* returns number of examined buffers and their common state in *state */
/*
* Returns number of examined buffers and their common state in *state.
* Requested number of buffers-to-examine must be > 0.
*/
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack, int merge_pending)
@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) {
if (!__state) {
__state = q->slsb.val[bufnr];
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
} else if (merge_pending) {
if ((q->slsb.val[bufnr] & __state) != __state)
break;
} else if (q->slsb.val[bufnr] != __state)
break;
/* get initial state: */
__state = q->slsb.val[bufnr];
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
for (i = 1; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
if (merge_pending &&
q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
__state == SLSB_P_OUTPUT_EMPTY)
continue;
/* stop if next state differs from initial state: */
if (q->slsb.val[bufnr] != __state)
break;
}
*state = __state;
return i;

View file

@ -589,6 +589,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
data->irq_enabled = true;
data->mode = THERMAL_DEVICE_ENABLED;
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
@ -600,9 +603,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
data->irq_enabled = true;
data->mode = THERMAL_DEVICE_ENABLED;
return 0;
}

View file

@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
.thaw_noirq = nhi_resume_noirq,
.restore_noirq = nhi_resume_noirq,
};

View file

@ -2238,6 +2238,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
if (tty_hung_up_p(file))
break;
/*
* Abort readers for ttys which never actually
* get hung up. See __tty_hangup().
*/
if (test_bit(TTY_HUPPING, &tty->flags))
break;
if (!timeout)
break;
if (file->f_flags & O_NONBLOCK) {

View file

@ -702,6 +702,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
return;
}
/*
* Some console devices aren't actually hung up for technical and
* historical reasons, which can lead to indefinite interruptible
* sleep in n_tty_read(). The following explicitly tells
* n_tty_read() to abort readers.
*/
set_bit(TTY_HUPPING, &tty->flags);
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
@ -757,6 +765,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
clear_bit(TTY_HUPPING, &tty->flags);
tty_unlock(tty);
if (f)

View file

@ -242,8 +242,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
if (!udev->parent)
rc = hcd_bus_suspend(udev, msg);
/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
/*
* Non-root USB2 devices don't need to do anything for FREEZE
* or PRETHAW. USB3 devices don't support global suspend and
* needs to be selectively suspended.
*/
else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
&& (udev->speed < USB_SPEED_SUPER))
rc = 0;
else
rc = usb_port_suspend(udev, msg);

View file

@ -167,7 +167,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
return ret;
goto err;
}
pci_set_drvdata(pci, dwc3);

View file

@ -114,15 +114,19 @@ static int service_tx_status_request(
}
is_in = epnum & USB_DIR_IN;
if (is_in) {
epnum &= 0x0f;
ep = &musb->endpoints[epnum].ep_in;
} else {
ep = &musb->endpoints[epnum].ep_out;
epnum &= 0x0f;
if (epnum >= MUSB_C_NUM_EPS) {
handled = -EINVAL;
break;
}
if (is_in)
ep = &musb->endpoints[epnum].ep_in;
else
ep = &musb->endpoints[epnum].ep_out;
regs = musb->endpoints[epnum].regs;
if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
if (!ep->desc) {
handled = -EINVAL;
break;
}

View file

@ -752,6 +752,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
return 0;
}
static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
offset + PCI_EXP_DEVCTL);
int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/*
* The FLR bit is virtualized, if set and the device supports PCIe
* FLR, issue a reset_function. Regardless, clear the bit, the spec
* requires it to be always read as zero. NB, reset_function might
* not use a PCIe FLR, we don't have that level of granularity.
*/
if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
u32 cap;
int ret;
*ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
ret = pci_user_read_config_dword(vdev->pdev,
pos - offset + PCI_EXP_DEVCAP,
&cap);
if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
pci_try_reset_function(vdev->pdev);
}
/*
* MPS is virtualized to the user, writes do not change the physical
* register since determining a proper MPS value requires a system wide
* device view. The MRRS is largely independent of MPS, but since the
* user does not have that system-wide view, they might set a safe, but
* inefficiently low value. Here we allow writes through to hardware,
* but we set the floor to the physical device MPS setting, so that
* we can at least use full TLPs, as defined by the MPS value.
*
* NB, if any devices actually depend on an artificially low MRRS
* setting, this will need to be revisited, perhaps with a quirk
* though pcie_set_readrq().
*/
if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
readrq = 128 <<
((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
readrq = max(readrq, pcie_get_mps(vdev->pdev));
pcie_set_readrq(vdev->pdev, readrq);
}
return count;
}
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
@ -759,26 +815,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
perm->writefn = vfio_exp_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
* Allow writes to device control fields (includes FLR!)
* but not to devctl_phantom which could confuse IOMMU
* or to the ARI bit in devctl2 which is set at probe time
* Allow writes to device control fields, except devctl_phantom,
* which could confuse IOMMU, MPS, which can break communication
* with other physical devices, and the ARI bit in devctl2, which
* is set at probe time. FLR and MRRS get virtualized via our
* writefn.
*/
p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/*
* The FLR bit is virtualized, if set and the device supports AF
* FLR, issue a reset_function. Regardless, clear the bit, the spec
* requires it to be always read as zero. NB, reset_function might
* not use an AF FLR, we don't have that level of granularity.
*/
if (*ctrl & PCI_AF_CTRL_FLR) {
u8 cap;
int ret;
*ctrl &= ~PCI_AF_CTRL_FLR;
ret = pci_user_read_config_byte(vdev->pdev,
pos - offset + PCI_AF_CAP,
&cap);
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
pci_try_reset_function(vdev->pdev);
}
return count;
}
/* Permissions for Advanced Function capability */
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
return -ENOMEM;
perm->writefn = vfio_af_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
return 0;
}

View file

@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
& F71808FG_FLAG_WD_EN);
& BIT(F71808FG_FLAG_WD_EN));
superio_exit(watchdog.sioaddr);

View file

@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t m
autofs4_del_active(dentry);
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
return -ENOMEM;
d_add(dentry, inode);

View file

@ -25,15 +25,8 @@
#include <linux/namei.h>
#include "fscrypt_private.h"
/*
* Call fscrypt_decrypt_page on every single page, reusing the encryption
* context.
*/
static void completion_pages(struct work_struct *work)
static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
{
struct fscrypt_ctx *ctx =
container_of(work, struct fscrypt_ctx, r.work);
struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
@ -45,22 +38,38 @@ static void completion_pages(struct work_struct *work)
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
} else {
} else if (done) {
SetPageUptodate(page);
}
unlock_page(page);
if (done)
unlock_page(page);
}
}
void fscrypt_decrypt_bio(struct bio *bio)
{
__fscrypt_decrypt_bio(bio, false);
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
static void completion_pages(struct work_struct *work)
{
struct fscrypt_ctx *ctx =
container_of(work, struct fscrypt_ctx, r.work);
struct bio *bio = ctx->r.bio;
__fscrypt_decrypt_bio(bio, true);
fscrypt_release_ctx(ctx);
bio_put(bio);
}
void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
queue_work(fscrypt_read_workqueue, &ctx->r.work);
fscrypt_enqueue_decrypt_work(&ctx->r.work);
}
EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{

View file

@ -45,12 +45,18 @@ static mempool_t *fscrypt_bounce_page_pool = NULL;
static LIST_HEAD(fscrypt_free_ctxs);
static DEFINE_SPINLOCK(fscrypt_ctx_lock);
struct workqueue_struct *fscrypt_read_workqueue;
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
queue_work(fscrypt_read_workqueue, work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
/**
* fscrypt_release_ctx() - Releases an encryption context
* @ctx: The encryption context to release.

View file

@ -107,7 +107,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
extern struct workqueue_struct *fscrypt_read_workqueue;
extern int fscrypt_do_page_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page,

View file

@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
*/
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data);
ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
return 0;
}
@ -447,6 +445,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
if (err) {

View file

@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}
/* Initializes an uninitialized inode bitmap */
static int ext4_init_inode_bitmap(struct super_block *sb,
struct buffer_head *bh,
ext4_group_t block_group,
struct ext4_group_desc *gdp)
{
struct ext4_group_info *grp;
struct ext4_sb_info *sbi = EXT4_SB(sb);
J_ASSERT_BH(bh, buffer_locked(bh));
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
grp->bb_free);
set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
int count;
count = ext4_free_inodes_count(sb, gdp);
percpu_counter_sub(&sbi->s_freeinodes_counter,
count);
}
set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
return -EFSBADCRC;
}
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
bh->b_data);
ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
EXT4_INODES_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, block_group, gdp);
return 0;
}
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
@ -184,17 +146,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
if (err) {
ext4_error(sb, "Failed to init inode bitmap for group "
"%u: %d", block_group, err);
goto out;
}
return bh;
}
ext4_unlock_group(sb, block_group);

View file

@ -377,7 +377,7 @@ out:
static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
unsigned int len)
{
int ret, size;
int ret, size, no_expand;
struct ext4_inode_info *ei = EXT4_I(inode);
if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
@ -387,15 +387,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
if (size < len)
return -ENOSPC;
down_write(&EXT4_I(inode)->xattr_sem);
ext4_write_lock_xattr(inode, &no_expand);
if (ei->i_inline_off)
ret = ext4_update_inline_data(handle, inode, len);
else
ret = ext4_create_inline_data(handle, inode, len);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
return ret;
}
@ -537,7 +536,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
unsigned flags)
{
int ret, needed_blocks;
int ret, needed_blocks, no_expand;
handle_t *handle = NULL;
int retries = 0, sem_held = 0;
struct page *page = NULL;
@ -577,7 +576,7 @@ retry:
goto out;
}
down_write(&EXT4_I(inode)->xattr_sem);
ext4_write_lock_xattr(inode, &no_expand);
sem_held = 1;
/* If some one has already done this for us, just exit. */
if (!ext4_has_inline_data(inode)) {
@ -613,7 +612,7 @@ retry:
page_cache_release(page);
page = NULL;
ext4_orphan_add(handle, inode);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
sem_held = 0;
ext4_journal_stop(handle);
handle = NULL;
@ -639,7 +638,7 @@ out:
page_cache_release(page);
}
if (sem_held)
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
if (handle)
ext4_journal_stop(handle);
brelse(iloc.bh);
@ -732,7 +731,7 @@ convert:
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct page *page)
{
int ret;
int ret, no_expand;
void *kaddr;
struct ext4_iloc iloc;
@ -750,7 +749,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
goto out;
}
down_write(&EXT4_I(inode)->xattr_sem);
ext4_write_lock_xattr(inode, &no_expand);
BUG_ON(!ext4_has_inline_data(inode));
kaddr = kmap_atomic(page);
@ -760,7 +759,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
/* clear page dirty so that writepages wouldn't work for us. */
ClearPageDirty(page);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
brelse(iloc.bh);
out:
return copied;
@ -771,7 +770,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
unsigned len,
struct page *page)
{
int ret;
int ret, no_expand;
void *kaddr;
struct ext4_iloc iloc;
@ -781,11 +780,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
return NULL;
}
down_write(&EXT4_I(inode)->xattr_sem);
ext4_write_lock_xattr(inode, &no_expand);
kaddr = kmap_atomic(page);
ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
kunmap_atomic(kaddr);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
return iloc.bh;
}
@ -1268,7 +1267,7 @@ out:
int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
struct inode *dir, struct inode *inode)
{
int ret, inline_size;
int ret, inline_size, no_expand;
void *inline_start;
struct ext4_iloc iloc;
@ -1276,7 +1275,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
if (ret)
return ret;
down_write(&EXT4_I(dir)->xattr_sem);
ext4_write_lock_xattr(dir, &no_expand);
if (!ext4_has_inline_data(dir))
goto out;
@ -1322,7 +1321,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
out:
ext4_mark_inode_dirty(handle, dir);
up_write(&EXT4_I(dir)->xattr_sem);
ext4_write_unlock_xattr(dir, &no_expand);
brelse(iloc.bh);
return ret;
}
@ -1682,7 +1681,7 @@ int ext4_delete_inline_entry(handle_t *handle,
struct buffer_head *bh,
int *has_inline_data)
{
int err, inline_size;
int err, inline_size, no_expand;
struct ext4_iloc iloc;
void *inline_start;
@ -1690,7 +1689,7 @@ int ext4_delete_inline_entry(handle_t *handle,
if (err)
return err;
down_write(&EXT4_I(dir)->xattr_sem);
ext4_write_lock_xattr(dir, &no_expand);
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
@ -1725,7 +1724,7 @@ int ext4_delete_inline_entry(handle_t *handle,
ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
out:
up_write(&EXT4_I(dir)->xattr_sem);
ext4_write_unlock_xattr(dir, &no_expand);
brelse(iloc.bh);
if (err != -ENOENT)
ext4_std_error(dir->i_sb, err);
@ -1824,11 +1823,11 @@ out:
int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
{
int ret;
int ret, no_expand;
down_write(&EXT4_I(inode)->xattr_sem);
ext4_write_lock_xattr(inode, &no_expand);
ret = ext4_destroy_inline_data_nolock(handle, inode);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
return ret;
}
@ -1913,7 +1912,7 @@ out:
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
int inline_size, value_len, needed_blocks;
int inline_size, value_len, needed_blocks, no_expand;
size_t i_size;
void *value = NULL;
struct ext4_xattr_ibody_find is = {
@ -1930,7 +1929,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
if (IS_ERR(handle))
return;
down_write(&EXT4_I(inode)->xattr_sem);
ext4_write_lock_xattr(inode, &no_expand);
if (!ext4_has_inline_data(inode)) {
*has_inline = 0;
ext4_journal_stop(handle);
@ -1988,7 +1987,7 @@ out_error:
up_write(&EXT4_I(inode)->i_data_sem);
out:
brelse(is.iloc.bh);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
kfree(value);
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
@ -2004,7 +2003,7 @@ out:
int ext4_convert_inline_data(struct inode *inode)
{
int error, needed_blocks;
int error, needed_blocks, no_expand;
handle_t *handle;
struct ext4_iloc iloc;
@ -2026,15 +2025,10 @@ int ext4_convert_inline_data(struct inode *inode)
goto out_free;
}
down_write(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
up_write(&EXT4_I(inode)->xattr_sem);
goto out;
}
error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
up_write(&EXT4_I(inode)->xattr_sem);
out:
ext4_write_lock_xattr(inode, &no_expand);
if (ext4_has_inline_data(inode))
error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
ext4_write_unlock_xattr(inode, &no_expand);
ext4_journal_stop(handle);
out_free:
brelse(iloc.bh);

View file

@ -1545,6 +1545,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
if (page_mapped(page))
clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
ClearPageUptodate(page);
}
@ -3297,29 +3299,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
* case, we allocate an io_end structure to hook to the iocb.
*/
iocb->private = NULL;
ext4_inode_aio_set(inode, NULL);
if (!is_sync_kiocb(iocb)) {
io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end) {
ret = -ENOMEM;
goto retake_lock;
}
/*
* Grab reference for DIO. Will be dropped in ext4_end_io_dio()
*/
iocb->private = ext4_get_io_end(io_end);
/*
* we save the io structure for current async direct
* IO, so that later ext4_map_blocks() could flag the
* io structure whether there is a unwritten extents
* needs to be converted when IO is completed.
*/
ext4_inode_aio_set(inode, io_end);
}
if (overwrite) {
get_block_func = ext4_get_block_write_nolock;
} else {
ext4_inode_aio_set(inode, NULL);
if (!is_sync_kiocb(iocb)) {
io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end) {
ret = -ENOMEM;
goto retake_lock;
}
/*
* Grab reference for DIO. Will be dropped in
* ext4_end_io_dio()
*/
iocb->private = ext4_get_io_end(io_end);
/*
* we save the io structure for current async direct
* IO, so that later ext4_map_blocks() could flag the
* io structure whether there is a unwritten extents
* needs to be converted when IO is completed.
*/
ext4_inode_aio_set(inode, io_end);
}
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
@ -4317,6 +4319,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
EXT4_ERROR_INODE(inode, "root inode unallocated");
ret = -EFSCORRUPTED;
goto bad_inode;
}
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >

View file

@ -2130,6 +2130,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u overlaps "
"superblock", i);
if (!(sb->s_flags & MS_RDONLY))
return 0;
}
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@ -2142,6 +2144,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u overlaps "
"superblock", i);
if (!(sb->s_flags & MS_RDONLY))
return 0;
}
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@ -2154,6 +2158,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u overlaps "
"superblock", i);
if (!(sb->s_flags & MS_RDONLY))
return 0;
}
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {

View file

@ -1143,16 +1143,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
struct ext4_xattr_block_find bs = {
.s = { .not_found = -ENODATA, },
};
unsigned long no_expand;
int no_expand;
int error;
if (!name)
return -EINVAL;
if (strlen(name) > 255)
return -ERANGE;
down_write(&EXT4_I(inode)->xattr_sem);
no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
ext4_write_lock_xattr(inode, &no_expand);
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
if (error)
@ -1213,7 +1211,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
ext4_xattr_update_super_block(handle, inode->i_sb);
inode->i_ctime = ext4_current_time(inode);
if (!value)
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
no_expand = 0;
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@ -1227,9 +1225,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
cleanup:
brelse(is.iloc.bh);
brelse(bs.bh);
if (no_expand == 0)
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
return error;
}
@ -1313,12 +1309,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
int error = 0, tried_min_extra_isize = 0;
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
int isize_diff; /* How much do we need to grow i_extra_isize */
int no_expand;
if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
return 0;
down_write(&EXT4_I(inode)->xattr_sem);
/*
* Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
*/
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
retry:
isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
@ -1512,8 +1507,7 @@ retry:
}
brelse(bh);
out:
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem);
ext4_write_unlock_xattr(inode, &no_expand);
return 0;
cleanup:
@ -1525,10 +1519,10 @@ cleanup:
kfree(bs);
brelse(bh);
/*
* We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
* size expansion failed.
* Inode size expansion failed; don't try again
*/
up_write(&EXT4_I(inode)->xattr_sem);
no_expand = 1;
ext4_write_unlock_xattr(inode, &no_expand);
return error;
}

View file

@ -101,6 +101,38 @@ extern const struct xattr_handler ext4_xattr_security_handler;
#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
/*
* The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
* The first is to signal that there the inline xattrs and data are
* taking up so much space that we might as well not keep trying to
* expand it. The second is that xattr_sem is taken for writing, so
* we shouldn't try to recurse into the inode expansion. For this
* second case, we need to make sure that we take save and restore the
* NO_EXPAND state flag appropriately.
*/
static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
{
down_write(&EXT4_I(inode)->xattr_sem);
*save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
}
static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
{
if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
return 0;
*save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
return 1;
}
static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
{
if (*save == 0)
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem);
}
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);

View file

@ -386,7 +386,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
f2fs_set_page_dirty_nobuffers(page);
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);

View file

@ -19,8 +19,6 @@
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
@ -30,6 +28,11 @@
#include <trace/events/f2fs.h>
#include <trace/events/android_fs.h>
#define NUM_PREALLOC_POST_READ_CTXS 128
static struct kmem_cache *bio_post_read_ctx_cache;
static mempool_t *bio_post_read_ctx_pool;
static bool __is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
@ -50,11 +53,77 @@ static bool __is_cp_guaranteed(struct page *page)
return false;
}
static void f2fs_read_end_io(struct bio *bio)
/* postprocessing steps for read bios */
enum bio_post_read_step {
STEP_INITIAL = 0,
STEP_DECRYPT,
};
struct bio_post_read_ctx {
struct bio *bio;
struct work_struct work;
unsigned int cur_step;
unsigned int enabled_steps;
};
static void __read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
struct page *page;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
page = bv->bv_page;
/* PG_error was set if any post_read step failed */
if (bio->bi_error || PageError(page)) {
ClearPageUptodate(page);
SetPageError(page);
} else {
SetPageUptodate(page);
}
unlock_page(page);
}
if (bio->bi_private)
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
bio_put(bio);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
static void decrypt_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
fscrypt_decrypt_bio(ctx->bio);
bio_post_read_processing(ctx);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
{
switch (++ctx->cur_step) {
case STEP_DECRYPT:
if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
INIT_WORK(&ctx->work, decrypt_work);
fscrypt_enqueue_decrypt_work(&ctx->work);
return;
}
ctx->cur_step++;
/* fall-through */
default:
__read_end_io(ctx->bio);
}
}
static bool f2fs_bio_post_read_required(struct bio *bio)
{
return bio->bi_private && !bio->bi_error;
}
static void f2fs_read_end_io(struct bio *bio)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
@ -62,28 +131,15 @@ static void f2fs_read_end_io(struct bio *bio)
}
#endif
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private);
} else {
fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
ctx->cur_step = STEP_INITIAL;
bio_post_read_processing(ctx);
return;
}
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
}
bio_put(bio);
__read_end_io(bio);
}
static void f2fs_write_end_io(struct bio *bio)
@ -480,29 +536,33 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
struct bio *bio;
struct bio_post_read_ctx *ctx;
unsigned int post_read_steps = 0;
if (f2fs_encrypted_file(inode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (post_read_steps) {
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
if (!ctx) {
bio_put(bio);
return ERR_PTR(-ENOMEM);
}
ctx->bio = bio;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
/* wait the page to be moved by cleaning */
f2fs_wait_on_block_writeback(sbi, blkaddr);
}
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
if (!bio) {
if (ctx)
fscrypt_release_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
return bio;
}
@ -1523,7 +1583,7 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
if (!f2fs_encrypted_file(inode))
return 0;
/* wait for GCed encrypted page writeback */
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
@ -1673,6 +1733,7 @@ got_it:
goto out_writepage;
set_page_writeback(page);
ClearPageError(page);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@ -1695,6 +1756,7 @@ got_it:
goto out_writepage;
set_page_writeback(page);
ClearPageError(page);
/* LFS mode write path */
write_data_page(&dn, fio);
@ -2235,8 +2297,8 @@ repeat:
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_file(inode))
/* wait for GCed page writeback via META_MAPPING */
if (f2fs_post_read_required(inode))
f2fs_wait_on_block_writeback(sbi, blkaddr);
if (len == PAGE_SIZE || PageUptodate(page))
@ -2449,37 +2511,6 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 1;
}
/*
* This was copied from __set_page_dirty_buffers which gives higher performance
* in very high speed storages. (e.g., pmem)
*/
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
struct address_space *mapping = page->mapping;
struct mem_cgroup *memcg;
unsigned long flags;
if (unlikely(!mapping))
return;
spin_lock(&mapping->private_lock);
memcg = mem_cgroup_begin_page_stat(page);
SetPageDirty(page);
spin_unlock(&mapping->private_lock);
spin_lock_irqsave(&mapping->tree_lock, flags);
WARN_ON_ONCE(!PageUptodate(page));
account_page_dirtied(page, mapping, memcg);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
mem_cgroup_end_page_stat(memcg);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return;
}
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
@ -2503,7 +2534,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
}
if (!PageDirty(page)) {
f2fs_set_page_dirty_nobuffers(page);
__set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
@ -2596,3 +2627,27 @@ const struct address_space_operations f2fs_dblock_aops = {
.migratepage = f2fs_migrate_page,
#endif
};
int __init f2fs_init_post_read_processing(void)
{
bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
if (!bio_post_read_ctx_cache)
goto fail;
bio_post_read_ctx_pool =
mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
bio_post_read_ctx_cache);
if (!bio_post_read_ctx_pool)
goto fail_free_cache;
return 0;
fail_free_cache:
kmem_cache_destroy(bio_post_read_ctx_cache);
fail:
return -ENOMEM;
}
void __exit f2fs_destroy_post_read_processing(void)
{
mempool_destroy(bio_post_read_ctx_pool);
kmem_cache_destroy(bio_post_read_ctx_cache);
}

View file

@ -1673,7 +1673,7 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
}
static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
struct inode *inode)
struct inode *inode, bool cap)
{
if (!inode)
return true;
@ -1686,7 +1686,7 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
in_group_p(F2FS_OPTION(sbi).s_resgid))
return true;
if (capable(CAP_SYS_RESOURCE))
if (cap && capable(CAP_SYS_RESOURCE))
return true;
return false;
}
@ -1721,7 +1721,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
avail_user_block_count = sbi->user_block_count -
sbi->current_reserved_blocks;
if (!__allow_reserved_blocks(sbi, inode))
if (!__allow_reserved_blocks(sbi, inode, true))
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
@ -1928,7 +1928,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
valid_block_count = sbi->total_valid_block_count +
sbi->current_reserved_blocks + 1;
if (!__allow_reserved_blocks(sbi, inode))
if (!__allow_reserved_blocks(sbi, inode, false))
valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
if (unlikely(valid_block_count > sbi->user_block_count)) {
@ -2937,6 +2937,8 @@ void destroy_checkpoint_caches(void);
/*
* data.c
*/
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, nid_t ino, pgoff_t idx,
@ -2968,7 +2970,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
void f2fs_set_page_dirty_nobuffers(struct page *page);
int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
enum iostat_type io_type);
@ -3297,9 +3298,13 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
#endif
}
static inline bool f2fs_bio_encrypted(struct bio *bio)
/*
* Returns true if the reads of the inode's data need to undergo some
* postprocessing step, like decryption or authenticity verification.
*/
static inline bool f2fs_post_read_required(struct inode *inode)
{
return bio->bi_private != NULL;
return f2fs_encrypted_file(inode);
}
#define F2FS_FEATURE_FUNCS(name, flagname) \
@ -3367,7 +3372,7 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
{
return (f2fs_encrypted_file(inode) ||
return (f2fs_post_read_required(inode) ||
(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
F2FS_I_SB(inode)->s_ndevs);
}

View file

@ -113,8 +113,8 @@ mapped:
/* fill the page */
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_file(inode))
/* wait for GCed page writeback via META_MAPPING */
if (f2fs_post_read_required(inode))
f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
out_sem:

View file

@ -850,8 +850,8 @@ next_step:
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
/* if encrypted inode, let's go phase 3 */
if (f2fs_encrypted_file(inode)) {
/* if inode uses special I/O path, let's go phase 3 */
if (f2fs_post_read_required(inode)) {
add_gc_inode(gc_list, inode);
continue;
}
@ -899,7 +899,7 @@ next_step:
start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_encrypted_file(inode))
if (f2fs_post_read_required(inode))
move_data_block(inode, start_bidx, segno, off);
else
move_data_page(inode, start_bidx, gc_type,

View file

@ -26,7 +26,7 @@ bool f2fs_may_inline_data(struct inode *inode)
if (i_size_read(inode) > MAX_INLINE_DATA(inode))
return false;
if (f2fs_encrypted_file(inode))
if (f2fs_post_read_required(inode))
return false;
return true;

View file

@ -294,8 +294,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
alloc_nid_done(sbi, ino);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@ -594,8 +594,8 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
err = page_symlink(inode, disk_link.name, disk_link.len);
err_out:
d_instantiate(dentry, inode);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
/*
* Let's flush symlink data in order to avoid broken symlink as much as
@ -658,8 +658,8 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
alloc_nid_done(sbi, inode->i_ino);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@ -710,8 +710,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
alloc_nid_done(sbi, inode->i_ino);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);

View file

@ -1775,7 +1775,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
f2fs_set_page_dirty_nobuffers(page);
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
f2fs_trace_pid(page);

View file

@ -3100,8 +3100,13 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_root_stats();
if (err)
goto free_filesystem;
err = f2fs_init_post_read_processing();
if (err)
goto free_root_stats;
return 0;
free_root_stats:
f2fs_destroy_root_stats();
free_filesystem:
unregister_filesystem(&f2fs_fs_type);
free_shrinker:
@ -3124,6 +3129,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);

View file

@ -747,11 +747,12 @@ int inode_congested(struct inode *inode, int cong_bits)
*/
if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb;
bool locked, congested;
struct wb_lock_cookie lock_cookie = {};
bool congested;
wb = unlocked_inode_to_wb_begin(inode, &locked);
wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits);
unlocked_inode_to_wb_end(inode, locked);
unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested;
}

View file

@ -914,7 +914,7 @@ out:
}
/*
* This is a variaon of __jbd2_update_log_tail which checks for validity of
* This is a variation of __jbd2_update_log_tail which checks for validity of
* provided log tail and locks j_checkpoint_mutex. So it is safe against races
* with other threads updating log tail.
*/
@ -1384,6 +1384,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
journal_superblock_t *sb = journal->j_superblock;
int ret;
if (is_journal_aborted(journal))
return -EIO;
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);

View file

@ -345,7 +345,7 @@ static void jffs2_put_super (struct super_block *sb)
static void jffs2_kill_sb(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
if (!(sb->s_flags & MS_RDONLY))
if (c && !(sb->s_flags & MS_RDONLY))
jffs2_stop_garbage_collect_thread(c);
kill_mtd_super(sb);
kfree(c);

View file

@ -219,9 +219,10 @@ getname_kernel(const char * filename)
if (len <= EMBEDDED_NAME_MAX) {
result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
const size_t size = offsetof(struct filename, iname[1]);
struct filename *tmp;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
tmp = kmalloc(size, GFP_KERNEL);
if (unlikely(!tmp)) {
__putname(result);
return ERR_PTR(-ENOMEM);

View file

@ -1036,7 +1036,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
goto out_free;
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
mnt->mnt.mnt_flags = old->mnt.mnt_flags;
mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
/* Don't allow unprivileged users to change mount flags */
if (flag & CL_UNPRIVILEGED) {
mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;

View file

@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
u32 event_mask,
void *data, int data_type)
{
__u32 marks_mask, marks_ignored_mask;
__u32 marks_mask = 0, marks_ignored_mask = 0;
struct path *path = data;
pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
!d_can_lookup(path->dentry))
return false;
if (inode_mark && vfsmnt_mark) {
marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
} else if (inode_mark) {
/*
* if the event is for a child and this inode doesn't care about
* events on the child, don't send it!
*/
if ((event_mask & FS_EVENT_ON_CHILD) &&
!(inode_mark->mask & FS_EVENT_ON_CHILD))
return false;
marks_mask = inode_mark->mask;
marks_ignored_mask = inode_mark->ignored_mask;
} else if (vfsmnt_mark) {
marks_mask = vfsmnt_mark->mask;
marks_ignored_mask = vfsmnt_mark->ignored_mask;
} else {
BUG();
/*
* if the event is for a child and this inode doesn't care about
* events on the child, don't send it!
*/
if (inode_mark &&
(!(event_mask & FS_EVENT_ON_CHILD) ||
(inode_mark->mask & FS_EVENT_ON_CHILD))) {
marks_mask |= inode_mark->mask;
marks_ignored_mask |= inode_mark->ignored_mask;
}
if (vfsmnt_mark) {
marks_mask |= vfsmnt_mark->mask;
marks_ignored_mask |= vfsmnt_mark->ignored_mask;
}
if (d_is_dir(path->dentry) &&

View file

@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
if (IS_ERR(journal->j_dev_bd)) {
result = PTR_ERR(journal->j_dev_bd);
journal->j_dev_bd = NULL;
reiserfs_warning(super,
reiserfs_warning(super, "sh-457",
"journal_init_dev: Cannot open '%s': %i",
jdev_name, result);
return result;

View file

@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
dbg_save_space_info(c);
for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
for (i = 0; i < c->jhead_cnt; i++) {
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
if (err)
ubifs_ro_mode(c, err);
}
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb)
int err;
/* Synchronize write-buffers */
for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
for (i = 0; i < c->jhead_cnt; i++) {
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
if (err)
ubifs_ro_mode(c, err);
}
/*
* We are being cleanly unmounted which means the

View file

@ -193,6 +193,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
set_wb_congested(bdi->wb.congested, sync);
}
struct wb_lock_cookie {
bool locked;
unsigned long flags;
};
#ifdef CONFIG_CGROUP_WRITEBACK
/**

View file

@ -374,7 +374,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
* @lockedp: temp bool output param, to be passed to the end function
* @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
@ -382,12 +382,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
* The caller must call unlocked_inode_to_wb_end() with *@lockdep
* afterwards and can't sleep during transaction. IRQ may or may not be
* disabled on return.
* The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
* can't sleep during the transaction. IRQs may or may not be disabled on
* return.
*/
static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
@ -395,10 +395,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
* Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
if (unlikely(*lockedp))
spin_lock_irq(&inode->i_mapping->tree_lock);
if (unlikely(cookie->locked))
spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
/*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
@ -410,12 +410,14 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
* @locked: *@lockedp from unlocked_inode_to_wb_begin()
* @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
static inline void unlocked_inode_to_wb_end(struct inode *inode,
struct wb_lock_cookie *cookie)
{
if (unlikely(locked))
spin_unlock_irq(&inode->i_mapping->tree_lock);
if (unlikely(cookie->locked))
spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
cookie->flags);
rcu_read_unlock();
}
@ -462,12 +464,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
}
static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
return inode_to_wb(inode);
}
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
static inline void unlocked_inode_to_wb_end(struct inode *inode,
struct wb_lock_cookie *cookie)
{
}

View file

@ -24,6 +24,10 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
}
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
{
@ -160,10 +164,13 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}
/* bio.c */
static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
struct bio *bio)
static inline void fscrypt_decrypt_bio(struct bio *bio)
{
}
static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio)
{
return;
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)

View file

@ -58,6 +58,7 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
}
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
@ -187,7 +188,9 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}
/* bio.c */
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);

View file

@ -793,7 +793,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
int hid_input_report(struct hid_device *, int type, u8 *, int, int);
int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
@ -1098,13 +1098,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*
* @report: the report we want to know the length
*/
static inline int hid_report_len(struct hid_report *report)
static inline u32 hid_report_len(struct hid_report *report)
{
/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
return ((report->size - 1) >> 3) + 1 + (report->id > 0);
}
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/* HID quirks API */

View file

@ -241,10 +241,14 @@ extern pgprot_t protection_map[16];
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
* MM layer fills up gfp_mask for page allocations but fault handler might
* alter it if its implementation requires a different allocation context.
*
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */
gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */
void __user *virtual_address; /* Faulting virtual address */

View file

@ -342,6 +342,7 @@ struct tty_file_private {
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
#define TTY_HUPPING 19 /* Hangup in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))

View file

@ -127,6 +127,7 @@ typedef __u32 int32;
*/
struct cstate {
byte_t cs_this; /* connection id number (xmit) */
bool initialized; /* true if initialized */
struct cstate *next; /* next in ring (xmit) */
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
struct tcphdr cs_tcp;

View file

@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
char *buffer; /* vmallocated period */
size_t buffer_used; /* used length from period buffer */
struct mutex params_lock;
atomic_t rw_ref; /* concurrent read/write accesses */
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
struct snd_pcm_plugin *plugin_first;
struct snd_pcm_plugin *plugin_last;

View file

@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
if (IS_ERR(shp))
return PTR_ERR(shp);
if (shp->shm_file != sfd->file) {
/* ID was reused */
shm_unlock(shp);
return -EINVAL;
}
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
@ -414,8 +420,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
int ret;
/*
* In case of remap_file_pages() emulation, the file can represent
* removed IPC ID: propogate shm_lock() error to caller.
* In case of remap_file_pages() emulation, the file can represent an
* IPC ID that was removed, and possibly even reused by another shm
* segment already. Propagate this case as an error to caller.
*/
ret =__shm_open(vma);
if (ret)
@ -439,6 +446,7 @@ static int shm_release(struct inode *ino, struct file *file)
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
fput(sfd->file);
shm_file_data(file) = NULL;
kfree(sfd);
return 0;
@ -1198,7 +1206,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
sfd->file = shp->shm_file;
/*
* We need to take a reference to the real shm file to prevent the
* pointer from becoming stale in cases where the lifetime of the outer
* file extends beyond that of the shm segment. It's not usually
* possible, but it can happen during remap_file_pages() emulation as
* that unmaps the memory, then does ->mmap() via file reference only.
* We'll deny the ->mmap() if the shm segment was since removed, but to
* detect shm ID reuse we need to compare the file pointers.
*/
sfd->file = get_file(shp->shm_file);
sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags);

View file

@ -611,7 +611,8 @@ static int __find_resource(struct resource *root, struct resource *old,
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
size, constraint->align);
alloc.end = alloc.start + size - 1;
if (resource_contains(&avail, &alloc)) {
if (alloc.start <= alloc.end &&
resource_contains(&avail, &alloc)) {
new->start = alloc.start;
new->end = alloc.end;
return 0;

View file

@ -435,6 +435,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
debug_object_free(timer, &hrtimer_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }

View file

@ -573,7 +573,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
@ -632,7 +632,7 @@ static int __add_to_page_cache_locked(struct page *page,
return error;
}
error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (error) {
if (!huge)
mem_cgroup_cancel_charge(page, memcg);
@ -1194,8 +1194,7 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
err = add_to_page_cache_lru(page, mapping, offset,
gfp_mask & GFP_RECLAIM_MASK);
err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
@ -1829,19 +1828,18 @@ EXPORT_SYMBOL(generic_file_read_iter);
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
static int page_cache_read(struct file *file, pgoff_t offset)
static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int ret;
do {
page = page_cache_alloc_cold(mapping);
page = __page_cache_alloc(gfp_mask|__GFP_COLD);
if (!page)
return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset,
mapping_gfp_constraint(mapping, GFP_KERNEL));
ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
@ -2022,7 +2020,7 @@ no_cached_page:
* We're only likely to ever get here if MADV_RANDOM is in
* effect.
*/
error = page_cache_read(file, offset);
error = page_cache_read(file, offset, vmf->gfp_mask);
/*
* The page we want has now been added to the page cache.

Some files were not shown because too many files have changed in this diff Show more