This is the 4.4.129 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlre3XwACgkQONu9yGCS aT5KcRAAxB6w9SbjjlGv+PsN3ISQgnIPjWadBQ12WWnpr1sqZi0wrMZRsNiK5+UN wPalUBiLiAIqNoDVSrDUgjyqC+wnQjhM/9tudEBqXQ6TQbSHQfQpZHQabLEtXxCP Yd1EHwEgJrCHqaj17oFZFkps20ooKtSnYQ57pyZNem5EPR/ayaMWvo6WM7k6d2hD E2WE57ShLbvslYaSvmDXML6o9f/bBKHOuL0GymVtDEUcyTLuw3GZaplnuaSLz6kc o7tU2xVV+yajmpiEt4iR40Pgk+pygEGC14OI8dj/YHVotDzJKWnMgQ/HKxr8kyra ImQPwu9DmaWqAUGr2SRmE/SXJpKdeYM1rxA/H3pMSaP9nRc2ccHyQF/ASGfHs+Mv 9hNQBjRugS4UXDzFhRlEh97CyfVa/ZuF0WgiBtBYnXSdXKA1xDq9cVf3UJg7k6om 1X7HLEVLhVLR7/liPjhOlTj9vrUzc6NcN+uVdfnmspI1BjTBe3ezzLqEP8VTUsNQ p/V9r0i6TGR3gYQuTzjU/MaAuBZwj1D5sCnVUphCNUtSJf/0cjQsfYUcgtrtk67U 9Bjlo0pWHpAXxARiegBY3n5ClkZpdqEnt4Dp2MdR65pTSJ4MfC2UDLemUgB18arU IllNzG2GywgQSouH3s5XPNZLkEvX8iK5lUWqRQ7ZiaA/0jVkn70= =K6Qy -----END PGP SIGNATURE----- Merge 4.4.129 into android-4.4 Changes in 4.4.129 media: v4l2-compat-ioctl32: don't oops on overlay parisc: Fix out of array access in match_pci_device() perf intel-pt: Fix overlap detection to identify consecutive buffers correctly perf intel-pt: Fix sync_switch perf intel-pt: Fix error recovery from missing TIP packet perf intel-pt: Fix timestamp following overflow radeon: hide pointless #warning when compile testing Revert "perf tests: Decompress kernel module before objdump" block/loop: fix deadlock after loop_set_status s390/qdio: don't retry EQBS after CCQ 96 s390/qdio: don't merge ERROR output buffers s390/ipl: ensure loadparm valid flag is set getname_kernel() needs to make sure that ->name != ->iname in long case rtl8187: Fix NULL pointer dereference in priv->conf_mutex hwmon: (ina2xx) Fix access to uninitialized mutex cdc_ether: flag the Cinterion AHS8 modem by gemalto as WWAN slip: Check if rstate is initialized before uncompressing lan78xx: Correctly indicate invalid OTP x86/hweight: Get rid of the special calling convention x86/hweight: Don't clobber %rdi tty: make n_tty_read() always abort if hangup is in progress ubifs: Check ubifs_wbuf_sync() return code ubi: fastmap: Don't flush fastmap work on detach ubi: Fix error for write access ubi: Reject MLC NAND fs/reiserfs/journal.c: add missing resierfs_warning() arg resource: fix integer overflow at reallocation ipc/shm: fix use-after-free of shm file via remap_file_pages() mm, slab: reschedule cache_reap() on the same CPU usb: musb: gadget: misplaced out of bounds check ARM: dts: at91: at91sam9g25: fix mux-mask pinctrl property ARM: dts: at91: sama5d4: fix pinctrl compatible string xen-netfront: Fix hang on device removal regmap: Fix reversed bounds check in regmap_raw_write() ACPI / video: Add quirk to force acpi-video backlight on Samsung 670Z5E ACPI / hotplug / PCI: Check presence of slot itself in get_slot_status() USB:fix USB3 devices behind USB3 hubs not resuming at hibernate thaw usb: dwc3: pci: Properly cleanup resource HID: i2c-hid: fix size check and type usage powerpc/powernv: Handle unknown OPAL errors in opal_nvram_write() powerpc/64: Fix smp_wmb barrier definition use use lwsync consistently powerpc/powernv: define a standard delay for OPAL_BUSY type retry loops powerpc/powernv: Fix OPAL NVRAM driver OPAL_BUSY loops HID: Fix hid_report_len usage HID: core: Fix size as type u32 ASoC: ssm2602: Replace reg_default_raw with reg_default thunderbolt: Resume control channel after hibernation image is created random: use a tighter cap in credit_entropy_bits_safe() jbd2: if the journal is aborted then don't allow update of the log tail ext4: don't update checksum of new initialized bitmaps ext4: fail ext4_iget for root directory if unallocated RDMA/ucma: Don't allow setting RDMA_OPTION_IB_PATH without an RDMA device ALSA: pcm: Fix UAF at PCM release via PCM timer access IB/srp: Fix srp_abort() IB/srp: Fix completion vector assignment algorithm dmaengine: at_xdmac: fix rare residue corruption um: Use POSIX ucontext_t instead of struct ucontext iommu/vt-d: Fix a potential memory leak mmc: jz4740: Fix race condition in IRQ mask update clk: mvebu: armada-38x: add support for 1866MHz variants clk: mvebu: armada-38x: add support for missing clocks clk: bcm2835: De-assert/assert PLL reset signal when appropriate thermal: imx: Fix race condition in imx_thermal_probe() watchdog: f71808e_wdt: Fix WD_EN register read ALSA: oss: consolidate kmalloc/memset 0 call to kzalloc ALSA: pcm: Use ERESTARTSYS instead of EINTR in OSS emulation ALSA: pcm: Avoid potential races between OSS ioctls and read/write ALSA: pcm: Return -EBUSY for OSS ioctls changing busy streams ALSA: pcm: Fix mutex unbalance in OSS emulation ioctls ALSA: pcm: Fix endless loop for XRUN recovery in OSS emulation vfio-pci: Virtualize PCIe & AF FLR vfio/pci: Virtualize Maximum Payload Size vfio/pci: Virtualize Maximum Read Request Size ext4: don't allow r/w mounts if metadata blocks overlap the superblock drm/radeon: Fix PCIe lane width calculation ext4: fix crashes in dioread_nolock mode ext4: fix deadlock between inline_data and ext4_expand_extra_isize_ea() ALSA: line6: Use correct endpoint type for midi output ALSA: rawmidi: Fix missing input substream checks in compat ioctls ALSA: hda - New VIA controller suppor no-snoop path HID: hidraw: Fix crash on HIDIOCGFEATURE with a destroyed device MIPS: uaccess: Add micromips clobbers to bzero invocation MIPS: memset.S: EVA & fault support for small_memset MIPS: memset.S: Fix return of __clear_user from Lpartial_fixup MIPS: memset.S: Fix clobber of v1 in last_fixup powerpc/eeh: Fix enabling bridge MMIO windows powerpc/lib: Fix off-by-one in alternate feature patching jffs2_kill_sb(): deal with failed allocations hypfs_kill_super(): deal with failed allocations rpc_pipefs: fix double-dput() Don't leak MNT_INTERNAL away from internal mounts autofs: mount point create should honour passed in mode mm: allow GFP_{FS,IO} for page_cache_read page cache allocation mm/filemap.c: fix NULL pointer in page_cache_tree_insert() ext4: bugfix for mmaped pages in mpage_release_unused_pages() fanotify: fix logic of events on child writeback: safer lock nesting Linux 4.4.129 Change-Id: I8806d2cc92fe512f27a349e8f630ced0cac9a8d7 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
b1c4836e57
96 changed files with 826 additions and 425 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 128
|
||||
SUBLEVEL = 129
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
atmel,mux-mask = <
|
||||
/* A B C */
|
||||
0xffffffff 0xffe0399f 0xc000001c /* pioA */
|
||||
0x0007ffff 0x8000fe3f 0x00000000 /* pioB */
|
||||
0x0007ffff 0x00047e3f 0x00000000 /* pioB */
|
||||
0x80000000 0x07c0ffff 0xb83fffff /* pioC */
|
||||
0x003fffff 0x003f8000 0x00000000 /* pioD */
|
||||
>;
|
||||
|
|
|
@ -1354,7 +1354,7 @@
|
|||
pinctrl@fc06a000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
|
||||
compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
|
||||
ranges = <0xfc068000 0xfc068000 0x100
|
||||
0xfc06a000 0xfc06a000 0x4000>;
|
||||
/* WARNING: revisit as pin spec has changed */
|
||||
|
|
|
@ -1238,6 +1238,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
|||
{
|
||||
__kernel_size_t res;
|
||||
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
/* micromips memset / bzero also clobbers t7 & t8 */
|
||||
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
|
||||
#else
|
||||
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
|
||||
#endif /* CONFIG_CPU_MICROMIPS */
|
||||
|
||||
if (eva_kernel_access()) {
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
|
@ -1247,7 +1254,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
|||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
: bzero_clobbers);
|
||||
} else {
|
||||
might_fault();
|
||||
__asm__ __volatile__(
|
||||
|
@ -1258,7 +1265,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
|||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
: bzero_clobbers);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
|
|
@ -218,7 +218,7 @@
|
|||
1: PTR_ADDIU a0, 1 /* fill bytewise */
|
||||
R10KCBARRIER(0(ra))
|
||||
bne t1, a0, 1b
|
||||
sb a1, -1(a0)
|
||||
EX(sb, a1, -1(a0), .Lsmall_fixup\@)
|
||||
|
||||
2: jr ra /* done */
|
||||
move a2, zero
|
||||
|
@ -249,13 +249,18 @@
|
|||
PTR_L t0, TI_TASK($28)
|
||||
andi a2, STORMASK
|
||||
LONG_L t0, THREAD_BUADDR(t0)
|
||||
LONG_ADDU a2, t1
|
||||
LONG_ADDU a2, a0
|
||||
jr ra
|
||||
LONG_SUBU a2, t0
|
||||
|
||||
.Llast_fixup\@:
|
||||
jr ra
|
||||
andi v1, a2, STORMASK
|
||||
nop
|
||||
|
||||
.Lsmall_fixup\@:
|
||||
PTR_SUBU a2, t1, a0
|
||||
jr ra
|
||||
PTR_ADDIU a2, 1
|
||||
|
||||
.endm
|
||||
|
||||
|
|
|
@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
|
|||
(modpath->mod == PCI_FUNC(devfn)));
|
||||
}
|
||||
|
||||
/* index might be out of bounds for bc[] */
|
||||
if (index >= 6)
|
||||
return 0;
|
||||
|
||||
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
|
||||
return (modpath->bc[index] == id);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,8 @@
|
|||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
|
||||
|
||||
#ifdef __SUBARCH_HAS_LWSYNC
|
||||
/* The sub-arch has lwsync */
|
||||
#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
|
||||
# define SMPWMB LWSYNC
|
||||
#else
|
||||
# define SMPWMB eieio
|
||||
|
|
|
@ -21,6 +21,9 @@
|
|||
/* We calculate number of sg entries based on PAGE_SIZE */
|
||||
#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
|
||||
|
||||
/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
|
||||
#define OPAL_BUSY_DELAY_MS 10
|
||||
|
||||
/* /sys/firmware/opal */
|
||||
extern struct kobject *opal_kobj;
|
||||
|
||||
|
|
|
@ -5,10 +5,6 @@
|
|||
#include <linux/stringify.h>
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
|
||||
#define __SUBARCH_HAS_LWSYNC
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
|
||||
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
|
||||
|
|
|
@ -788,7 +788,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
|
|||
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
|
||||
|
||||
/* PCI Command: 0x4 */
|
||||
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
|
||||
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
|
||||
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
|
||||
|
||||
/* Check the PCIe link is ready */
|
||||
eeh_bridge_check_link(edev);
|
||||
|
|
|
@ -53,7 +53,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
|
|||
unsigned int *target = (unsigned int *)branch_target(src);
|
||||
|
||||
/* Branch within the section doesn't need translating */
|
||||
if (target < alt_start || target >= alt_end) {
|
||||
if (target < alt_start || target > alt_end) {
|
||||
instr = translate_branch(dest, src);
|
||||
if (!instr)
|
||||
return 1;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#define DEBUG
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
|
|||
|
||||
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
|
||||
rc = opal_write_nvram(__pa(buf), count, off);
|
||||
if (rc == OPAL_BUSY_EVENT)
|
||||
if (rc == OPAL_BUSY_EVENT) {
|
||||
msleep(OPAL_BUSY_DELAY_MS);
|
||||
opal_poll_events(NULL);
|
||||
} else if (rc == OPAL_BUSY) {
|
||||
msleep(OPAL_BUSY_DELAY_MS);
|
||||
}
|
||||
}
|
||||
|
||||
if (rc)
|
||||
return -EIO;
|
||||
|
||||
*index += count;
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb)
|
|||
|
||||
if (sb->s_root)
|
||||
hypfs_delete_tree(sb->s_root);
|
||||
if (sb_info->update_file)
|
||||
if (sb_info && sb_info->update_file)
|
||||
hypfs_remove(sb_info->update_file);
|
||||
kfree(sb->s_fs_info);
|
||||
sb->s_fs_info = NULL;
|
||||
|
|
|
@ -798,6 +798,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
|
|||
/* copy and convert to ebcdic */
|
||||
memcpy(ipb->hdr.loadparm, buf, lp_len);
|
||||
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
|
||||
ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
|
||||
return len;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
|
|||
|
||||
static void hard_handler(int sig, siginfo_t *si, void *p)
|
||||
{
|
||||
struct ucontext *uc = p;
|
||||
ucontext_t *uc = p;
|
||||
mcontext_t *mc = &uc->uc_mcontext;
|
||||
unsigned long pending = 1UL << sig;
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
void __attribute__ ((__section__ (".__syscall_stub")))
|
||||
stub_segv_handler(int sig, siginfo_t *info, void *p)
|
||||
{
|
||||
struct ucontext *uc = p;
|
||||
ucontext_t *uc = p;
|
||||
|
||||
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
|
||||
&uc->uc_mcontext);
|
||||
|
|
|
@ -205,6 +205,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||
"3570R/370R/470R/450R/510R/4450RV"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 670Z5E",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
|
||||
.callback = video_detect_force_video,
|
||||
|
|
|
@ -1582,7 +1582,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
return -EINVAL;
|
||||
if (val_len % map->format.val_bytes)
|
||||
return -EINVAL;
|
||||
if (map->max_raw_write && map->max_raw_write > val_len)
|
||||
if (map->max_raw_write && map->max_raw_write < val_len)
|
||||
return -E2BIG;
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
|
|
@ -1121,11 +1121,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|||
if (info->lo_encrypt_type) {
|
||||
unsigned int type = info->lo_encrypt_type;
|
||||
|
||||
if (type >= MAX_LO_CRYPT)
|
||||
return -EINVAL;
|
||||
if (type >= MAX_LO_CRYPT) {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
xfer = xfer_funcs[type];
|
||||
if (xfer == NULL)
|
||||
return -EINVAL;
|
||||
if (xfer == NULL) {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
} else
|
||||
xfer = NULL;
|
||||
|
||||
|
|
|
@ -724,7 +724,7 @@ retry:
|
|||
|
||||
static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
|
||||
{
|
||||
const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
|
||||
const int nbits_max = r->poolinfo->poolwords * 32;
|
||||
|
||||
if (nbits < 0)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -891,9 +891,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
|
|||
const struct bcm2835_pll_data *data = pll->data;
|
||||
|
||||
spin_lock(&cprman->regs_lock);
|
||||
cprman_write(cprman, data->cm_ctrl_reg,
|
||||
cprman_read(cprman, data->cm_ctrl_reg) |
|
||||
CM_PLL_ANARST);
|
||||
cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
|
||||
cprman_write(cprman, data->a2w_ctrl_reg,
|
||||
cprman_read(cprman, data->a2w_ctrl_reg) |
|
||||
A2W_PLL_CTRL_PWRDN);
|
||||
|
@ -929,6 +927,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
|
|||
cpu_relax();
|
||||
}
|
||||
|
||||
cprman_write(cprman, data->a2w_ctrl_reg,
|
||||
cprman_read(cprman, data->a2w_ctrl_reg) |
|
||||
A2W_PLL_CTRL_PRST_DISABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
|
|||
}
|
||||
|
||||
static const u32 armada_38x_cpu_frequencies[] __initconst = {
|
||||
0, 0, 0, 0,
|
||||
1066 * 1000 * 1000, 0, 0, 0,
|
||||
666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
|
||||
1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
|
||||
1332 * 1000 * 1000, 0, 0, 0,
|
||||
1600 * 1000 * 1000,
|
||||
1600 * 1000 * 1000, 0, 0, 0,
|
||||
1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
|
||||
};
|
||||
|
||||
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
|
||||
|
@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
|
|||
};
|
||||
|
||||
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {1, 2}, {0, 1},
|
||||
{1, 2}, {0, 1}, {1, 2}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {0, 1},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {1, 2},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
|
@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
|
|||
{1, 2}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {0, 1},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{1, 2}, {0, 1}, {0, 1}, {7, 15},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
{0, 1}, {0, 1}, {0, 1}, {0, 1},
|
||||
|
|
|
@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
|
||||
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
|
||||
rmb();
|
||||
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
|
||||
rmb();
|
||||
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
|
||||
rmb();
|
||||
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
|
||||
rmb();
|
||||
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
|
||||
rmb();
|
||||
|
||||
|
|
|
@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
* may be slow
|
||||
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
|
||||
thanks to write-combining
|
||||
#endif
|
||||
|
||||
if (bo->flags & RADEON_GEM_GTT_WC)
|
||||
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
|
||||
|
|
|
@ -5964,9 +5964,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
|
|||
{
|
||||
u32 lane_width;
|
||||
u32 new_lane_width =
|
||||
(radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
|
||||
((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
|
||||
u32 current_lane_width =
|
||||
(radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
|
||||
((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
|
||||
|
||||
if (new_lane_width != current_lane_width) {
|
||||
radeon_set_pcie_lanes(rdev, new_lane_width);
|
||||
|
|
|
@ -1331,7 +1331,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
|
|||
* of implement() working on 8 byte chunks
|
||||
*/
|
||||
|
||||
int len = hid_report_len(report) + 7;
|
||||
u32 len = hid_report_len(report) + 7;
|
||||
|
||||
return kmalloc(len, flags);
|
||||
}
|
||||
|
@ -1396,7 +1396,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
|
|||
{
|
||||
char *buf;
|
||||
int ret;
|
||||
int len;
|
||||
u32 len;
|
||||
|
||||
buf = hid_alloc_report_buf(report, GFP_KERNEL);
|
||||
if (!buf)
|
||||
|
@ -1422,14 +1422,14 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__hid_request);
|
||||
|
||||
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
|
||||
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
|
||||
int interrupt)
|
||||
{
|
||||
struct hid_report_enum *report_enum = hid->report_enum + type;
|
||||
struct hid_report *report;
|
||||
struct hid_driver *hdrv;
|
||||
unsigned int a;
|
||||
int rsize, csize = size;
|
||||
u32 rsize, csize = size;
|
||||
u8 *cdata = data;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
|
|||
*
|
||||
* This is data entry for lower layers.
|
||||
*/
|
||||
int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
|
||||
int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
|
||||
{
|
||||
struct hid_report_enum *report_enum;
|
||||
struct hid_driver *hdrv;
|
||||
|
|
|
@ -1258,7 +1258,8 @@ static void hidinput_led_worker(struct work_struct *work)
|
|||
led_work);
|
||||
struct hid_field *field;
|
||||
struct hid_report *report;
|
||||
int len, ret;
|
||||
int ret;
|
||||
u32 len;
|
||||
__u8 *buf;
|
||||
|
||||
field = hidinput_get_led_field(hid);
|
||||
|
|
|
@ -314,7 +314,8 @@ static struct attribute_group mt_attribute_group = {
|
|||
static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
|
||||
{
|
||||
struct mt_device *td = hid_get_drvdata(hdev);
|
||||
int ret, size = hid_report_len(report);
|
||||
int ret;
|
||||
u32 size = hid_report_len(report);
|
||||
u8 *buf;
|
||||
|
||||
/*
|
||||
|
@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
|
|||
struct hid_report_enum *re;
|
||||
struct mt_class *cls = &td->mtclass;
|
||||
char *buf;
|
||||
int report_len;
|
||||
u32 report_len;
|
||||
|
||||
if (td->inputmode < 0)
|
||||
return;
|
||||
|
|
|
@ -110,8 +110,8 @@ struct rmi_data {
|
|||
u8 *writeReport;
|
||||
u8 *readReport;
|
||||
|
||||
int input_report_size;
|
||||
int output_report_size;
|
||||
u32 input_report_size;
|
||||
u32 output_report_size;
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
|
|||
int ret = 0, len;
|
||||
unsigned char report_number;
|
||||
|
||||
if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev = hidraw_table[minor]->hid;
|
||||
|
||||
if (!dev->ll_driver->raw_request) {
|
||||
|
|
|
@ -137,10 +137,10 @@ struct i2c_hid {
|
|||
* register of the HID
|
||||
* descriptor. */
|
||||
unsigned int bufsize; /* i2c buffer size */
|
||||
char *inbuf; /* Input buffer */
|
||||
char *rawbuf; /* Raw Input buffer */
|
||||
char *cmdbuf; /* Command buffer */
|
||||
char *argsbuf; /* Command arguments buffer */
|
||||
u8 *inbuf; /* Input buffer */
|
||||
u8 *rawbuf; /* Raw Input buffer */
|
||||
u8 *cmdbuf; /* Command buffer */
|
||||
u8 *argsbuf; /* Command arguments buffer */
|
||||
|
||||
unsigned long flags; /* device flags */
|
||||
|
||||
|
@ -387,7 +387,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
|
|||
|
||||
static void i2c_hid_get_input(struct i2c_hid *ihid)
|
||||
{
|
||||
int ret, ret_size;
|
||||
int ret;
|
||||
u32 ret_size;
|
||||
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
|
||||
|
||||
if (size > ihid->bufsize)
|
||||
|
@ -412,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
|
|||
return;
|
||||
}
|
||||
|
||||
if (ret_size > size) {
|
||||
if ((ret_size > size) || (ret_size <= 2)) {
|
||||
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
|
||||
__func__, size, ret_size);
|
||||
return;
|
||||
|
|
|
@ -447,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
|
|||
|
||||
/* set the device type */
|
||||
data->config = &ina2xx_config[id->driver_data];
|
||||
mutex_init(&data->config_lock);
|
||||
|
||||
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
|
||||
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
|
||||
|
@ -473,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
mutex_init(&data->config_lock);
|
||||
|
||||
data->groups[group++] = &ina2xx_group;
|
||||
if (id->driver_data == ina226)
|
||||
data->groups[group++] = &ina226_group;
|
||||
|
|
|
@ -1230,6 +1230,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
|
|||
if (!optlen)
|
||||
return -EINVAL;
|
||||
|
||||
if (!ctx->cm_id->device)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&sa_path, 0, sizeof(sa_path));
|
||||
|
||||
ib_sa_unpack_path(path_data->path_rec, &sa_path);
|
||||
|
|
|
@ -2581,9 +2581,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
|
|||
ret = FAST_IO_FAIL;
|
||||
else
|
||||
ret = FAILED;
|
||||
if (ret == SUCCESS) {
|
||||
srp_free_req(ch, req, scmnd, 0);
|
||||
scmnd->result = DID_ABORT << 16;
|
||||
scmnd->scsi_done(scmnd);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3309,12 +3311,10 @@ static ssize_t srp_create_target(struct device *dev,
|
|||
num_online_nodes());
|
||||
const int ch_end = ((node_idx + 1) * target->ch_count /
|
||||
num_online_nodes());
|
||||
const int cv_start = (node_idx * ibdev->num_comp_vectors /
|
||||
num_online_nodes() + target->comp_vector)
|
||||
% ibdev->num_comp_vectors;
|
||||
const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
|
||||
num_online_nodes() + target->comp_vector)
|
||||
% ibdev->num_comp_vectors;
|
||||
const int cv_start = node_idx * ibdev->num_comp_vectors /
|
||||
num_online_nodes();
|
||||
const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
|
||||
num_online_nodes();
|
||||
int cpu_idx = 0;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
|
|
|
@ -389,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|||
pasid_max - 1, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
kfree(svm);
|
||||
kfree(sdev);
|
||||
goto out;
|
||||
}
|
||||
svm->pasid = ret;
|
||||
|
|
|
@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
|
|||
static int put_v4l2_window32(struct v4l2_window __user *kp,
|
||||
struct v4l2_window32 __user *up)
|
||||
{
|
||||
struct v4l2_clip __user *kclips = kp->clips;
|
||||
struct v4l2_clip __user *kclips;
|
||||
struct v4l2_clip32 __user *uclips;
|
||||
compat_caddr_t p;
|
||||
u32 clipcount;
|
||||
|
@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
|
|||
if (!clipcount)
|
||||
return 0;
|
||||
|
||||
if (get_user(kclips, &kp->clips))
|
||||
return -EFAULT;
|
||||
if (get_user(p, &up->clips))
|
||||
return -EFAULT;
|
||||
uclips = compat_ptr(p);
|
||||
|
|
|
@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
|
|||
host->irq_mask &= ~irq;
|
||||
else
|
||||
host->irq_mask |= irq;
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
}
|
||||
|
||||
static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
|
||||
|
|
|
@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
|
|||
* in any case.
|
||||
*/
|
||||
if (mode & FMODE_WRITE) {
|
||||
ret = -EPERM;
|
||||
ret = -EROFS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
|
|
@ -889,6 +889,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
|
||||
* MLC NAND is different and needs special care, otherwise UBI or UBIFS
|
||||
* will die soon and you will lose all your data.
|
||||
*/
|
||||
if (mtd->type == MTD_MLCNANDFLASH) {
|
||||
pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
|
||||
mtd->index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ubi_num == UBI_DEV_NUM_AUTO) {
|
||||
/* Search for an empty slot in the @ubi_devices array */
|
||||
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
|
||||
|
|
|
@ -360,7 +360,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
|
|||
{
|
||||
int i;
|
||||
|
||||
flush_work(&ubi->fm_work);
|
||||
return_unused_pool_pebs(ubi, &ubi->fm_pool);
|
||||
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
|
||||
|
||||
|
|
|
@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
|
|||
if(x < 0 || x > comp->rslot_limit)
|
||||
goto bad;
|
||||
|
||||
/* Check if the cstate is initialized */
|
||||
if (!comp->rstate[x].initialized)
|
||||
goto bad;
|
||||
|
||||
comp->flags &=~ SLF_TOSS;
|
||||
comp->recv_current = x;
|
||||
} else {
|
||||
|
@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
|
|||
if (cs->cs_tcp.doff > 5)
|
||||
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
|
||||
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
|
||||
cs->initialized = true;
|
||||
/* Put headers back on packet
|
||||
* Neither header checksum is recalculated
|
||||
*/
|
||||
|
|
|
@ -704,6 +704,12 @@ static const struct usb_device_id products[] = {
|
|||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Cinterion AHS3 modem by GEMALTO */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Telit modules */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
|
||||
|
|
|
@ -618,6 +618,7 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
|
|||
offset += 0x100;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
if (!ret)
|
||||
ret = lan78xx_read_raw_otp(dev, offset, length, data);
|
||||
}
|
||||
|
||||
|
|
|
@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
|
|||
goto err_free_dev;
|
||||
}
|
||||
mutex_init(&priv->io_mutex);
|
||||
mutex_init(&priv->conf_mutex);
|
||||
|
||||
SET_IEEE80211_DEV(dev, &intf->dev);
|
||||
usb_set_intfdata(intf, dev);
|
||||
|
@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
|
|||
printk(KERN_ERR "rtl8187: Cannot register device\n");
|
||||
goto err_free_dmabuf;
|
||||
}
|
||||
mutex_init(&priv->conf_mutex);
|
||||
skb_queue_head_init(&priv->b_tx_status.queue);
|
||||
|
||||
wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
|
||||
|
|
|
@ -2024,7 +2024,10 @@ static void netback_changed(struct xenbus_device *dev,
|
|||
case XenbusStateInitialised:
|
||||
case XenbusStateReconfiguring:
|
||||
case XenbusStateReconfigured:
|
||||
break;
|
||||
|
||||
case XenbusStateUnknown:
|
||||
wake_up_all(&module_unload_q);
|
||||
break;
|
||||
|
||||
case XenbusStateInitWait:
|
||||
|
@ -2155,7 +2158,9 @@ static int xennet_remove(struct xenbus_device *dev)
|
|||
xenbus_switch_state(dev, XenbusStateClosing);
|
||||
wait_event(module_unload_q,
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateClosing);
|
||||
XenbusStateClosing ||
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateUnknown);
|
||||
|
||||
xenbus_switch_state(dev, XenbusStateClosed);
|
||||
wait_event(module_unload_q,
|
||||
|
|
|
@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
|
|||
{
|
||||
unsigned long long sta = 0;
|
||||
struct acpiphp_func *func;
|
||||
u32 dvid;
|
||||
|
||||
list_for_each_entry(func, &slot->funcs, sibling) {
|
||||
if (func->flags & FUNC_HAS_STA) {
|
||||
|
@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
|
|||
if (ACPI_SUCCESS(status) && sta)
|
||||
break;
|
||||
} else {
|
||||
u32 dvid;
|
||||
|
||||
pci_bus_read_config_dword(slot->bus,
|
||||
PCI_DEVFN(slot->device,
|
||||
func->function),
|
||||
PCI_VENDOR_ID, &dvid);
|
||||
if (dvid != 0xffffffff) {
|
||||
if (pci_bus_read_dev_vendor_id(slot->bus,
|
||||
PCI_DEVFN(slot->device, func->function),
|
||||
&dvid, 0)) {
|
||||
sta = ACPI_STA_ALL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!sta) {
|
||||
/*
|
||||
* Check for the slot itself since it may be that the
|
||||
* ACPI slot is a device below PCIe upstream port so in
|
||||
* that case it may not even be reachable yet.
|
||||
*/
|
||||
if (pci_bus_read_dev_vendor_id(slot->bus,
|
||||
PCI_DEVFN(slot->device, 0), &dvid, 0)) {
|
||||
sta = ACPI_STA_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
return (unsigned int)sta;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
|
|||
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
|
||||
int start, int count, int auto_ack)
|
||||
{
|
||||
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
|
||||
int rc, tmp_count = count, tmp_start = start, nr = q->nr;
|
||||
unsigned int ccq = 0;
|
||||
|
||||
qperf_inc(q, eqbs);
|
||||
|
@ -149,13 +149,6 @@ again:
|
|||
qperf_inc(q, eqbs_partial);
|
||||
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
|
||||
tmp_count);
|
||||
/*
|
||||
* Retry once, if that fails bail out and process the
|
||||
* extracted buffers before trying again.
|
||||
*/
|
||||
if (!retried++)
|
||||
goto again;
|
||||
else
|
||||
return count - tmp_count;
|
||||
}
|
||||
|
||||
|
@ -212,7 +205,10 @@ again:
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* returns number of examined buffers and their common state in *state */
|
||||
/*
|
||||
* Returns number of examined buffers and their common state in *state.
|
||||
* Requested number of buffers-to-examine must be > 0.
|
||||
*/
|
||||
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
|
||||
unsigned char *state, unsigned int count,
|
||||
int auto_ack, int merge_pending)
|
||||
|
@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
|
|||
if (is_qebsm(q))
|
||||
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (!__state) {
|
||||
/* get initial state: */
|
||||
__state = q->slsb.val[bufnr];
|
||||
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
|
||||
__state = SLSB_P_OUTPUT_EMPTY;
|
||||
} else if (merge_pending) {
|
||||
if ((q->slsb.val[bufnr] & __state) != __state)
|
||||
break;
|
||||
} else if (q->slsb.val[bufnr] != __state)
|
||||
break;
|
||||
|
||||
for (i = 1; i < count; i++) {
|
||||
bufnr = next_buf(bufnr);
|
||||
|
||||
/* merge PENDING into EMPTY: */
|
||||
if (merge_pending &&
|
||||
q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
|
||||
__state == SLSB_P_OUTPUT_EMPTY)
|
||||
continue;
|
||||
|
||||
/* stop if next state differs from initial state: */
|
||||
if (q->slsb.val[bufnr] != __state)
|
||||
break;
|
||||
}
|
||||
*state = __state;
|
||||
return i;
|
||||
|
|
|
@ -589,6 +589,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
|
|||
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
|
||||
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
|
||||
|
||||
data->irq_enabled = true;
|
||||
data->mode = THERMAL_DEVICE_ENABLED;
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
|
||||
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
|
||||
0, "imx_thermal", data);
|
||||
|
@ -600,9 +603,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
data->irq_enabled = true;
|
||||
data->mode = THERMAL_DEVICE_ENABLED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
|
|||
* we just disable hotplug, the
|
||||
* pci-tunnels stay alive.
|
||||
*/
|
||||
.thaw_noirq = nhi_resume_noirq,
|
||||
.restore_noirq = nhi_resume_noirq,
|
||||
};
|
||||
|
||||
|
|
|
@ -2238,6 +2238,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
|
|||
}
|
||||
if (tty_hung_up_p(file))
|
||||
break;
|
||||
/*
|
||||
* Abort readers for ttys which never actually
|
||||
* get hung up. See __tty_hangup().
|
||||
*/
|
||||
if (test_bit(TTY_HUPPING, &tty->flags))
|
||||
break;
|
||||
if (!timeout)
|
||||
break;
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
|
|
|
@ -702,6 +702,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some console devices aren't actually hung up for technical and
|
||||
* historical reasons, which can lead to indefinite interruptible
|
||||
* sleep in n_tty_read(). The following explicitly tells
|
||||
* n_tty_read() to abort readers.
|
||||
*/
|
||||
set_bit(TTY_HUPPING, &tty->flags);
|
||||
|
||||
/* inuse_filps is protected by the single tty lock,
|
||||
this really needs to change if we want to flush the
|
||||
workqueue with the lock held */
|
||||
|
@ -757,6 +765,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
|
|||
* can't yet guarantee all that.
|
||||
*/
|
||||
set_bit(TTY_HUPPED, &tty->flags);
|
||||
clear_bit(TTY_HUPPING, &tty->flags);
|
||||
tty_unlock(tty);
|
||||
|
||||
if (f)
|
||||
|
|
|
@ -208,8 +208,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
|
|||
if (!udev->parent)
|
||||
rc = hcd_bus_suspend(udev, msg);
|
||||
|
||||
/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
|
||||
else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
|
||||
/*
|
||||
* Non-root USB2 devices don't need to do anything for FREEZE
|
||||
* or PRETHAW. USB3 devices don't support global suspend and
|
||||
* needs to be selectively suspended.
|
||||
*/
|
||||
else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
|
||||
&& (udev->speed < USB_SPEED_SUPER))
|
||||
rc = 0;
|
||||
else
|
||||
rc = usb_port_suspend(udev, msg);
|
||||
|
|
|
@ -167,7 +167,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
|
|||
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
|
||||
if (ret) {
|
||||
dev_err(dev, "couldn't add resources to dwc3 device\n");
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pci, dwc3);
|
||||
|
|
|
@ -114,15 +114,19 @@ static int service_tx_status_request(
|
|||
}
|
||||
|
||||
is_in = epnum & USB_DIR_IN;
|
||||
if (is_in) {
|
||||
epnum &= 0x0f;
|
||||
ep = &musb->endpoints[epnum].ep_in;
|
||||
} else {
|
||||
ep = &musb->endpoints[epnum].ep_out;
|
||||
if (epnum >= MUSB_C_NUM_EPS) {
|
||||
handled = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_in)
|
||||
ep = &musb->endpoints[epnum].ep_in;
|
||||
else
|
||||
ep = &musb->endpoints[epnum].ep_out;
|
||||
regs = musb->endpoints[epnum].regs;
|
||||
|
||||
if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
|
||||
if (!ep->desc) {
|
||||
handled = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -752,6 +752,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
|
||||
offset + PCI_EXP_DEVCTL);
|
||||
int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
|
||||
|
||||
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
|
||||
if (count < 0)
|
||||
return count;
|
||||
|
||||
/*
|
||||
* The FLR bit is virtualized, if set and the device supports PCIe
|
||||
* FLR, issue a reset_function. Regardless, clear the bit, the spec
|
||||
* requires it to be always read as zero. NB, reset_function might
|
||||
* not use a PCIe FLR, we don't have that level of granularity.
|
||||
*/
|
||||
if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
|
||||
u32 cap;
|
||||
int ret;
|
||||
|
||||
*ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
|
||||
|
||||
ret = pci_user_read_config_dword(vdev->pdev,
|
||||
pos - offset + PCI_EXP_DEVCAP,
|
||||
&cap);
|
||||
|
||||
if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
|
||||
pci_try_reset_function(vdev->pdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* MPS is virtualized to the user, writes do not change the physical
|
||||
* register since determining a proper MPS value requires a system wide
|
||||
* device view. The MRRS is largely independent of MPS, but since the
|
||||
* user does not have that system-wide view, they might set a safe, but
|
||||
* inefficiently low value. Here we allow writes through to hardware,
|
||||
* but we set the floor to the physical device MPS setting, so that
|
||||
* we can at least use full TLPs, as defined by the MPS value.
|
||||
*
|
||||
* NB, if any devices actually depend on an artificially low MRRS
|
||||
* setting, this will need to be revisited, perhaps with a quirk
|
||||
* though pcie_set_readrq().
|
||||
*/
|
||||
if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
|
||||
readrq = 128 <<
|
||||
((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
|
||||
readrq = max(readrq, pcie_get_mps(vdev->pdev));
|
||||
|
||||
pcie_set_readrq(vdev->pdev, readrq);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/* Permissions for PCI Express capability */
|
||||
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
|
||||
{
|
||||
|
@ -759,26 +815,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
|
|||
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
|
||||
return -ENOMEM;
|
||||
|
||||
perm->writefn = vfio_exp_config_write;
|
||||
|
||||
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
|
||||
|
||||
/*
|
||||
* Allow writes to device control fields (includes FLR!)
|
||||
* but not to devctl_phantom which could confuse IOMMU
|
||||
* or to the ARI bit in devctl2 which is set at probe time
|
||||
* Allow writes to device control fields, except devctl_phantom,
|
||||
* which could confuse IOMMU, MPS, which can break communication
|
||||
* with other physical devices, and the ARI bit in devctl2, which
|
||||
* is set at probe time. FLR and MRRS get virtualized via our
|
||||
* writefn.
|
||||
*/
|
||||
p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
|
||||
p_setw(perm, PCI_EXP_DEVCTL,
|
||||
PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
|
||||
PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
|
||||
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
|
||||
|
||||
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
|
||||
if (count < 0)
|
||||
return count;
|
||||
|
||||
/*
|
||||
* The FLR bit is virtualized, if set and the device supports AF
|
||||
* FLR, issue a reset_function. Regardless, clear the bit, the spec
|
||||
* requires it to be always read as zero. NB, reset_function might
|
||||
* not use an AF FLR, we don't have that level of granularity.
|
||||
*/
|
||||
if (*ctrl & PCI_AF_CTRL_FLR) {
|
||||
u8 cap;
|
||||
int ret;
|
||||
|
||||
*ctrl &= ~PCI_AF_CTRL_FLR;
|
||||
|
||||
ret = pci_user_read_config_byte(vdev->pdev,
|
||||
pos - offset + PCI_AF_CAP,
|
||||
&cap);
|
||||
|
||||
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
|
||||
pci_try_reset_function(vdev->pdev);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/* Permissions for Advanced Function capability */
|
||||
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
|
||||
{
|
||||
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
|
||||
return -ENOMEM;
|
||||
|
||||
perm->writefn = vfio_af_config_write;
|
||||
|
||||
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
|
||||
p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
|
||||
p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
|
|||
|
||||
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
|
||||
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
|
||||
& F71808FG_FLAG_WD_EN);
|
||||
& BIT(F71808FG_FLAG_WD_EN));
|
||||
|
||||
superio_exit(watchdog.sioaddr);
|
||||
|
||||
|
|
|
@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t m
|
|||
|
||||
autofs4_del_active(dentry);
|
||||
|
||||
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
|
||||
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
|
||||
if (!inode)
|
||||
return -ENOMEM;
|
||||
d_add(dentry, inode);
|
||||
|
|
|
@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
|
|||
*/
|
||||
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
|
||||
sb->s_blocksize * 8, bh->b_data);
|
||||
ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
|
||||
ext4_group_desc_csum_set(sb, block_group, gdp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -447,6 +445,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
|
|||
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
|
||||
set_bitmap_uptodate(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
set_buffer_verified(bh);
|
||||
ext4_unlock_group(sb, block_group);
|
||||
unlock_buffer(bh);
|
||||
if (err) {
|
||||
|
|
|
@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
|
|||
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
|
||||
}
|
||||
|
||||
/* Initializes an uninitialized inode bitmap */
|
||||
static int ext4_init_inode_bitmap(struct super_block *sb,
|
||||
struct buffer_head *bh,
|
||||
ext4_group_t block_group,
|
||||
struct ext4_group_desc *gdp)
|
||||
{
|
||||
struct ext4_group_info *grp;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
J_ASSERT_BH(bh, buffer_locked(bh));
|
||||
|
||||
/* If checksum is bad mark all blocks and inodes use to prevent
|
||||
* allocation, essentially implementing a per-group read-only flag. */
|
||||
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
|
||||
grp = ext4_get_group_info(sb, block_group);
|
||||
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
|
||||
percpu_counter_sub(&sbi->s_freeclusters_counter,
|
||||
grp->bb_free);
|
||||
set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
|
||||
if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
|
||||
int count;
|
||||
count = ext4_free_inodes_count(sb, gdp);
|
||||
percpu_counter_sub(&sbi->s_freeinodes_counter,
|
||||
count);
|
||||
}
|
||||
set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
|
||||
return -EFSBADCRC;
|
||||
}
|
||||
|
||||
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
|
||||
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
|
||||
bh->b_data);
|
||||
ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
|
||||
EXT4_INODES_PER_GROUP(sb) / 8);
|
||||
ext4_group_desc_csum_set(sb, block_group, gdp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
|
||||
{
|
||||
if (uptodate) {
|
||||
|
@ -184,17 +146,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
|||
|
||||
ext4_lock_group(sb, block_group);
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
|
||||
err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
|
||||
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
|
||||
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
|
||||
sb->s_blocksize * 8, bh->b_data);
|
||||
set_bitmap_uptodate(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
set_buffer_verified(bh);
|
||||
ext4_unlock_group(sb, block_group);
|
||||
unlock_buffer(bh);
|
||||
if (err) {
|
||||
ext4_error(sb, "Failed to init inode bitmap for group "
|
||||
"%u: %d", block_group, err);
|
||||
goto out;
|
||||
}
|
||||
return bh;
|
||||
}
|
||||
ext4_unlock_group(sb, block_group);
|
||||
|
|
|
@ -377,7 +377,7 @@ out:
|
|||
static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
|
||||
unsigned int len)
|
||||
{
|
||||
int ret, size;
|
||||
int ret, size, no_expand;
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
|
||||
if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
|
||||
|
@ -387,15 +387,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
|
|||
if (size < len)
|
||||
return -ENOSPC;
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
|
||||
if (ei->i_inline_off)
|
||||
ret = ext4_update_inline_data(handle, inode, len);
|
||||
else
|
||||
ret = ext4_create_inline_data(handle, inode, len);
|
||||
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -537,7 +536,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
|
|||
struct inode *inode,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret, needed_blocks;
|
||||
int ret, needed_blocks, no_expand;
|
||||
handle_t *handle = NULL;
|
||||
int retries = 0, sem_held = 0;
|
||||
struct page *page = NULL;
|
||||
|
@ -577,7 +576,7 @@ retry:
|
|||
goto out;
|
||||
}
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
sem_held = 1;
|
||||
/* If some one has already done this for us, just exit. */
|
||||
if (!ext4_has_inline_data(inode)) {
|
||||
|
@ -613,7 +612,7 @@ retry:
|
|||
page_cache_release(page);
|
||||
page = NULL;
|
||||
ext4_orphan_add(handle, inode);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
sem_held = 0;
|
||||
ext4_journal_stop(handle);
|
||||
handle = NULL;
|
||||
|
@ -639,7 +638,7 @@ out:
|
|||
page_cache_release(page);
|
||||
}
|
||||
if (sem_held)
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
if (handle)
|
||||
ext4_journal_stop(handle);
|
||||
brelse(iloc.bh);
|
||||
|
@ -732,7 +731,7 @@ convert:
|
|||
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
||||
unsigned copied, struct page *page)
|
||||
{
|
||||
int ret;
|
||||
int ret, no_expand;
|
||||
void *kaddr;
|
||||
struct ext4_iloc iloc;
|
||||
|
||||
|
@ -750,7 +749,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
|||
goto out;
|
||||
}
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
BUG_ON(!ext4_has_inline_data(inode));
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
|
@ -760,7 +759,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
|||
/* clear page dirty so that writepages wouldn't work for us. */
|
||||
ClearPageDirty(page);
|
||||
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
brelse(iloc.bh);
|
||||
out:
|
||||
return copied;
|
||||
|
@ -771,7 +770,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
|
|||
unsigned len,
|
||||
struct page *page)
|
||||
{
|
||||
int ret;
|
||||
int ret, no_expand;
|
||||
void *kaddr;
|
||||
struct ext4_iloc iloc;
|
||||
|
||||
|
@ -781,11 +780,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
kaddr = kmap_atomic(page);
|
||||
ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
|
||||
kunmap_atomic(kaddr);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
|
||||
return iloc.bh;
|
||||
}
|
||||
|
@ -1269,7 +1268,7 @@ out:
|
|||
int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
|
||||
struct dentry *dentry, struct inode *inode)
|
||||
{
|
||||
int ret, inline_size;
|
||||
int ret, inline_size, no_expand;
|
||||
void *inline_start;
|
||||
struct ext4_iloc iloc;
|
||||
struct inode *dir = d_inode(dentry->d_parent);
|
||||
|
@ -1278,7 +1277,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
down_write(&EXT4_I(dir)->xattr_sem);
|
||||
ext4_write_lock_xattr(dir, &no_expand);
|
||||
if (!ext4_has_inline_data(dir))
|
||||
goto out;
|
||||
|
||||
|
@ -1324,7 +1323,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
|
|||
|
||||
out:
|
||||
ext4_mark_inode_dirty(handle, dir);
|
||||
up_write(&EXT4_I(dir)->xattr_sem);
|
||||
ext4_write_unlock_xattr(dir, &no_expand);
|
||||
brelse(iloc.bh);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1684,7 +1683,7 @@ int ext4_delete_inline_entry(handle_t *handle,
|
|||
struct buffer_head *bh,
|
||||
int *has_inline_data)
|
||||
{
|
||||
int err, inline_size;
|
||||
int err, inline_size, no_expand;
|
||||
struct ext4_iloc iloc;
|
||||
void *inline_start;
|
||||
|
||||
|
@ -1692,7 +1691,7 @@ int ext4_delete_inline_entry(handle_t *handle,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
down_write(&EXT4_I(dir)->xattr_sem);
|
||||
ext4_write_lock_xattr(dir, &no_expand);
|
||||
if (!ext4_has_inline_data(dir)) {
|
||||
*has_inline_data = 0;
|
||||
goto out;
|
||||
|
@ -1727,7 +1726,7 @@ int ext4_delete_inline_entry(handle_t *handle,
|
|||
|
||||
ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
|
||||
out:
|
||||
up_write(&EXT4_I(dir)->xattr_sem);
|
||||
ext4_write_unlock_xattr(dir, &no_expand);
|
||||
brelse(iloc.bh);
|
||||
if (err != -ENOENT)
|
||||
ext4_std_error(dir->i_sb, err);
|
||||
|
@ -1826,11 +1825,11 @@ out:
|
|||
|
||||
int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
|
||||
{
|
||||
int ret;
|
||||
int ret, no_expand;
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
ret = ext4_destroy_inline_data_nolock(handle, inode);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1915,7 +1914,7 @@ out:
|
|||
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
|
||||
{
|
||||
handle_t *handle;
|
||||
int inline_size, value_len, needed_blocks;
|
||||
int inline_size, value_len, needed_blocks, no_expand;
|
||||
size_t i_size;
|
||||
void *value = NULL;
|
||||
struct ext4_xattr_ibody_find is = {
|
||||
|
@ -1932,7 +1931,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
|
|||
if (IS_ERR(handle))
|
||||
return;
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
if (!ext4_has_inline_data(inode)) {
|
||||
*has_inline = 0;
|
||||
ext4_journal_stop(handle);
|
||||
|
@ -1990,7 +1989,7 @@ out_error:
|
|||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
out:
|
||||
brelse(is.iloc.bh);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
kfree(value);
|
||||
if (inode->i_nlink)
|
||||
ext4_orphan_del(handle, inode);
|
||||
|
@ -2006,7 +2005,7 @@ out:
|
|||
|
||||
int ext4_convert_inline_data(struct inode *inode)
|
||||
{
|
||||
int error, needed_blocks;
|
||||
int error, needed_blocks, no_expand;
|
||||
handle_t *handle;
|
||||
struct ext4_iloc iloc;
|
||||
|
||||
|
@ -2028,15 +2027,10 @@ int ext4_convert_inline_data(struct inode *inode)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
if (!ext4_has_inline_data(inode)) {
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
if (ext4_has_inline_data(inode))
|
||||
error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
out:
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
ext4_journal_stop(handle);
|
||||
out_free:
|
||||
brelse(iloc.bh);
|
||||
|
|
|
@ -1528,6 +1528,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
|
|||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(PageWriteback(page));
|
||||
if (invalidate) {
|
||||
if (page_mapped(page))
|
||||
clear_page_dirty_for_io(page);
|
||||
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||
ClearPageUptodate(page);
|
||||
}
|
||||
|
@ -3280,6 +3282,9 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
* case, we allocate an io_end structure to hook to the iocb.
|
||||
*/
|
||||
iocb->private = NULL;
|
||||
if (overwrite) {
|
||||
get_block_func = ext4_get_block_write_nolock;
|
||||
} else {
|
||||
ext4_inode_aio_set(inode, NULL);
|
||||
if (!is_sync_kiocb(iocb)) {
|
||||
io_end = ext4_init_io_end(inode, GFP_NOFS);
|
||||
|
@ -3288,7 +3293,8 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
goto retake_lock;
|
||||
}
|
||||
/*
|
||||
* Grab reference for DIO. Will be dropped in ext4_end_io_dio()
|
||||
* Grab reference for DIO. Will be dropped in
|
||||
* ext4_end_io_dio()
|
||||
*/
|
||||
iocb->private = ext4_get_io_end(io_end);
|
||||
/*
|
||||
|
@ -3299,10 +3305,6 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
*/
|
||||
ext4_inode_aio_set(inode, io_end);
|
||||
}
|
||||
|
||||
if (overwrite) {
|
||||
get_block_func = ext4_get_block_write_nolock;
|
||||
} else {
|
||||
get_block_func = ext4_get_block_write;
|
||||
dio_flags = DIO_LOCKING;
|
||||
}
|
||||
|
@ -4288,6 +4290,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|||
goto bad_inode;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
|
||||
if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
|
||||
EXT4_ERROR_INODE(inode, "root inode unallocated");
|
||||
ret = -EFSCORRUPTED;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
|
||||
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
|
||||
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
|
||||
|
|
|
@ -2130,6 +2130,8 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|||
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
||||
"Block bitmap for group %u overlaps "
|
||||
"superblock", i);
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
}
|
||||
if (block_bitmap < first_block || block_bitmap > last_block) {
|
||||
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
||||
|
@ -2142,6 +2144,8 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|||
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
||||
"Inode bitmap for group %u overlaps "
|
||||
"superblock", i);
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
}
|
||||
if (inode_bitmap < first_block || inode_bitmap > last_block) {
|
||||
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
||||
|
@ -2154,6 +2158,8 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|||
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
|
||||
"Inode table for group %u overlaps "
|
||||
"superblock", i);
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
}
|
||||
if (inode_table < first_block ||
|
||||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
|
||||
|
|
|
@ -1143,16 +1143,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
|
|||
struct ext4_xattr_block_find bs = {
|
||||
.s = { .not_found = -ENODATA, },
|
||||
};
|
||||
unsigned long no_expand;
|
||||
int no_expand;
|
||||
int error;
|
||||
|
||||
if (!name)
|
||||
return -EINVAL;
|
||||
if (strlen(name) > 255)
|
||||
return -ERANGE;
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
|
||||
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
|
||||
if (error)
|
||||
|
@ -1213,7 +1211,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
|
|||
ext4_xattr_update_super_block(handle, inode->i_sb);
|
||||
inode->i_ctime = ext4_current_time(inode);
|
||||
if (!value)
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
no_expand = 0;
|
||||
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
|
||||
/*
|
||||
* The bh is consumed by ext4_mark_iloc_dirty, even with
|
||||
|
@ -1227,9 +1225,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
|
|||
cleanup:
|
||||
brelse(is.iloc.bh);
|
||||
brelse(bs.bh);
|
||||
if (no_expand == 0)
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1313,12 +1309,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
|||
int error = 0, tried_min_extra_isize = 0;
|
||||
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
|
||||
int isize_diff; /* How much do we need to grow i_extra_isize */
|
||||
int no_expand;
|
||||
|
||||
if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
|
||||
return 0;
|
||||
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
/*
|
||||
* Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
|
||||
*/
|
||||
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
retry:
|
||||
isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
|
||||
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
|
||||
|
@ -1512,8 +1507,7 @@ retry:
|
|||
}
|
||||
brelse(bh);
|
||||
out:
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
@ -1525,10 +1519,10 @@ cleanup:
|
|||
kfree(bs);
|
||||
brelse(bh);
|
||||
/*
|
||||
* We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
|
||||
* size expansion failed.
|
||||
* Inode size expansion failed; don't try again
|
||||
*/
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
no_expand = 1;
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -101,6 +101,38 @@ extern const struct xattr_handler ext4_xattr_security_handler;
|
|||
|
||||
#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
|
||||
|
||||
/*
|
||||
* The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
|
||||
* The first is to signal that there the inline xattrs and data are
|
||||
* taking up so much space that we might as well not keep trying to
|
||||
* expand it. The second is that xattr_sem is taken for writing, so
|
||||
* we shouldn't try to recurse into the inode expansion. For this
|
||||
* second case, we need to make sure that we take save and restore the
|
||||
* NO_EXPAND state flag appropriately.
|
||||
*/
|
||||
static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
|
||||
{
|
||||
down_write(&EXT4_I(inode)->xattr_sem);
|
||||
*save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
}
|
||||
|
||||
static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
|
||||
{
|
||||
if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
|
||||
return 0;
|
||||
*save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
|
||||
{
|
||||
if (*save == 0)
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
|
||||
up_write(&EXT4_I(inode)->xattr_sem);
|
||||
}
|
||||
|
||||
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
|
||||
|
||||
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
|
||||
|
|
|
@ -747,11 +747,12 @@ int inode_congested(struct inode *inode, int cong_bits)
|
|||
*/
|
||||
if (inode && inode_to_wb_is_valid(inode)) {
|
||||
struct bdi_writeback *wb;
|
||||
bool locked, congested;
|
||||
struct wb_lock_cookie lock_cookie = {};
|
||||
bool congested;
|
||||
|
||||
wb = unlocked_inode_to_wb_begin(inode, &locked);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
|
||||
congested = wb_congested(wb, cong_bits);
|
||||
unlocked_inode_to_wb_end(inode, locked);
|
||||
unlocked_inode_to_wb_end(inode, &lock_cookie);
|
||||
return congested;
|
||||
}
|
||||
|
||||
|
|
|
@ -914,7 +914,7 @@ out:
|
|||
}
|
||||
|
||||
/*
|
||||
* This is a variaon of __jbd2_update_log_tail which checks for validity of
|
||||
* This is a variation of __jbd2_update_log_tail which checks for validity of
|
||||
* provided log tail and locks j_checkpoint_mutex. So it is safe against races
|
||||
* with other threads updating log tail.
|
||||
*/
|
||||
|
@ -1384,6 +1384,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
|
|||
journal_superblock_t *sb = journal->j_superblock;
|
||||
int ret;
|
||||
|
||||
if (is_journal_aborted(journal))
|
||||
return -EIO;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
|
||||
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
|
||||
tail_block, tail_tid);
|
||||
|
|
|
@ -345,7 +345,7 @@ static void jffs2_put_super (struct super_block *sb)
|
|||
static void jffs2_kill_sb(struct super_block *sb)
|
||||
{
|
||||
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
if (c && !(sb->s_flags & MS_RDONLY))
|
||||
jffs2_stop_garbage_collect_thread(c);
|
||||
kill_mtd_super(sb);
|
||||
kfree(c);
|
||||
|
|
|
@ -219,9 +219,10 @@ getname_kernel(const char * filename)
|
|||
if (len <= EMBEDDED_NAME_MAX) {
|
||||
result->name = (char *)result->iname;
|
||||
} else if (len <= PATH_MAX) {
|
||||
const size_t size = offsetof(struct filename, iname[1]);
|
||||
struct filename *tmp;
|
||||
|
||||
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
tmp = kmalloc(size, GFP_KERNEL);
|
||||
if (unlikely(!tmp)) {
|
||||
__putname(result);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
|
@ -1036,7 +1036,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
|
||||
mnt->mnt.mnt_flags = old->mnt.mnt_flags;
|
||||
mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
|
||||
/* Don't allow unprivileged users to change mount flags */
|
||||
if (flag & CL_UNPRIVILEGED) {
|
||||
mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
|
||||
|
|
|
@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
|
|||
u32 event_mask,
|
||||
void *data, int data_type)
|
||||
{
|
||||
__u32 marks_mask, marks_ignored_mask;
|
||||
__u32 marks_mask = 0, marks_ignored_mask = 0;
|
||||
struct path *path = data;
|
||||
|
||||
pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
|
||||
|
@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
|
|||
!d_can_lookup(path->dentry))
|
||||
return false;
|
||||
|
||||
if (inode_mark && vfsmnt_mark) {
|
||||
marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
|
||||
marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
|
||||
} else if (inode_mark) {
|
||||
/*
|
||||
* if the event is for a child and this inode doesn't care about
|
||||
* events on the child, don't send it!
|
||||
*/
|
||||
if ((event_mask & FS_EVENT_ON_CHILD) &&
|
||||
!(inode_mark->mask & FS_EVENT_ON_CHILD))
|
||||
return false;
|
||||
marks_mask = inode_mark->mask;
|
||||
marks_ignored_mask = inode_mark->ignored_mask;
|
||||
} else if (vfsmnt_mark) {
|
||||
marks_mask = vfsmnt_mark->mask;
|
||||
marks_ignored_mask = vfsmnt_mark->ignored_mask;
|
||||
} else {
|
||||
BUG();
|
||||
if (inode_mark &&
|
||||
(!(event_mask & FS_EVENT_ON_CHILD) ||
|
||||
(inode_mark->mask & FS_EVENT_ON_CHILD))) {
|
||||
marks_mask |= inode_mark->mask;
|
||||
marks_ignored_mask |= inode_mark->ignored_mask;
|
||||
}
|
||||
|
||||
if (vfsmnt_mark) {
|
||||
marks_mask |= vfsmnt_mark->mask;
|
||||
marks_ignored_mask |= vfsmnt_mark->ignored_mask;
|
||||
}
|
||||
|
||||
if (d_is_dir(path->dentry) &&
|
||||
|
|
|
@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
|
|||
if (IS_ERR(journal->j_dev_bd)) {
|
||||
result = PTR_ERR(journal->j_dev_bd);
|
||||
journal->j_dev_bd = NULL;
|
||||
reiserfs_warning(super,
|
||||
reiserfs_warning(super, "sh-457",
|
||||
"journal_init_dev: Cannot open '%s': %i",
|
||||
jdev_name, result);
|
||||
return result;
|
||||
|
|
|
@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
|
|||
|
||||
dbg_save_space_info(c);
|
||||
|
||||
for (i = 0; i < c->jhead_cnt; i++)
|
||||
ubifs_wbuf_sync(&c->jheads[i].wbuf);
|
||||
for (i = 0; i < c->jhead_cnt; i++) {
|
||||
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
|
||||
if (err)
|
||||
ubifs_ro_mode(c, err);
|
||||
}
|
||||
|
||||
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
|
||||
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
||||
|
@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb)
|
|||
int err;
|
||||
|
||||
/* Synchronize write-buffers */
|
||||
for (i = 0; i < c->jhead_cnt; i++)
|
||||
ubifs_wbuf_sync(&c->jheads[i].wbuf);
|
||||
for (i = 0; i < c->jhead_cnt; i++) {
|
||||
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
|
||||
if (err)
|
||||
ubifs_ro_mode(c, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* We are being cleanly unmounted which means the
|
||||
|
|
|
@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
|
|||
set_wb_congested(bdi->wb.congested, sync);
|
||||
}
|
||||
|
||||
struct wb_lock_cookie {
|
||||
bool locked;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
|
||||
/**
|
||||
|
|
|
@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
|
|||
/**
|
||||
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
|
||||
* @inode: target inode
|
||||
* @lockedp: temp bool output param, to be passed to the end function
|
||||
* @cookie: output param, to be passed to the end function
|
||||
*
|
||||
* The caller wants to access the wb associated with @inode but isn't
|
||||
* holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
|
||||
|
@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
|
|||
* association doesn't change until the transaction is finished with
|
||||
* unlocked_inode_to_wb_end().
|
||||
*
|
||||
* The caller must call unlocked_inode_to_wb_end() with *@lockdep
|
||||
* afterwards and can't sleep during transaction. IRQ may or may not be
|
||||
* disabled on return.
|
||||
* The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
|
||||
* can't sleep during the transaction. IRQs may or may not be disabled on
|
||||
* return.
|
||||
*/
|
||||
static inline struct bdi_writeback *
|
||||
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
|
||||
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
|
|||
* Paired with store_release in inode_switch_wb_work_fn() and
|
||||
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
|
||||
*/
|
||||
*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
|
||||
cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
|
||||
|
||||
if (unlikely(*lockedp))
|
||||
spin_lock_irq(&inode->i_mapping->tree_lock);
|
||||
if (unlikely(cookie->locked))
|
||||
spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
|
||||
|
||||
/*
|
||||
* Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
|
||||
|
@ -402,12 +402,14 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
|
|||
/**
|
||||
* unlocked_inode_to_wb_end - end inode wb access transaction
|
||||
* @inode: target inode
|
||||
* @locked: *@lockedp from unlocked_inode_to_wb_begin()
|
||||
* @cookie: @cookie from unlocked_inode_to_wb_begin()
|
||||
*/
|
||||
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
|
||||
static inline void unlocked_inode_to_wb_end(struct inode *inode,
|
||||
struct wb_lock_cookie *cookie)
|
||||
{
|
||||
if (unlikely(locked))
|
||||
spin_unlock_irq(&inode->i_mapping->tree_lock);
|
||||
if (unlikely(cookie->locked))
|
||||
spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
|
||||
cookie->flags);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -454,12 +456,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
|
|||
}
|
||||
|
||||
static inline struct bdi_writeback *
|
||||
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
|
||||
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
|
||||
{
|
||||
return inode_to_wb(inode);
|
||||
}
|
||||
|
||||
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
|
||||
static inline void unlocked_inode_to_wb_end(struct inode *inode,
|
||||
struct wb_lock_cookie *cookie)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -793,7 +793,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
|
|||
extern void hidinput_disconnect(struct hid_device *);
|
||||
|
||||
int hid_set_field(struct hid_field *, unsigned, __s32);
|
||||
int hid_input_report(struct hid_device *, int type, u8 *, int, int);
|
||||
int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
|
||||
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
|
||||
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
|
||||
unsigned int hidinput_count_leds(struct hid_device *hid);
|
||||
|
@ -1098,13 +1098,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
|
|||
*
|
||||
* @report: the report we want to know the length
|
||||
*/
|
||||
static inline int hid_report_len(struct hid_report *report)
|
||||
static inline u32 hid_report_len(struct hid_report *report)
|
||||
{
|
||||
/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
|
||||
return ((report->size - 1) >> 3) + 1 + (report->id > 0);
|
||||
}
|
||||
|
||||
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
|
||||
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
|
||||
int interrupt);
|
||||
|
||||
/* HID quirks API */
|
||||
|
|
|
@ -240,10 +240,14 @@ extern pgprot_t protection_map[16];
|
|||
* ->fault function. The vma's ->fault is responsible for returning a bitmask
|
||||
* of VM_FAULT_xxx flags that give details about how the fault was handled.
|
||||
*
|
||||
* MM layer fills up gfp_mask for page allocations but fault handler might
|
||||
* alter it if its implementation requires a different allocation context.
|
||||
*
|
||||
* pgoff should be used in favour of virtual_address, if possible.
|
||||
*/
|
||||
struct vm_fault {
|
||||
unsigned int flags; /* FAULT_FLAG_xxx flags */
|
||||
gfp_t gfp_mask; /* gfp mask to be used for allocations */
|
||||
pgoff_t pgoff; /* Logical page offset based on vma */
|
||||
void __user *virtual_address; /* Faulting virtual address */
|
||||
|
||||
|
|
|
@ -342,6 +342,7 @@ struct tty_file_private {
|
|||
#define TTY_PTY_LOCK 16 /* pty private */
|
||||
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
|
||||
#define TTY_HUPPED 18 /* Post driver->hangup() */
|
||||
#define TTY_HUPPING 19 /* Hangup in progress */
|
||||
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
|
||||
|
||||
#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
|
||||
|
|
|
@ -127,6 +127,7 @@ typedef __u32 int32;
|
|||
*/
|
||||
struct cstate {
|
||||
byte_t cs_this; /* connection id number (xmit) */
|
||||
bool initialized; /* true if initialized */
|
||||
struct cstate *next; /* next in ring (xmit) */
|
||||
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
|
||||
struct tcphdr cs_tcp;
|
||||
|
|
|
@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
|
|||
char *buffer; /* vmallocated period */
|
||||
size_t buffer_used; /* used length from period buffer */
|
||||
struct mutex params_lock;
|
||||
atomic_t rw_ref; /* concurrent read/write accesses */
|
||||
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
|
||||
struct snd_pcm_plugin *plugin_first;
|
||||
struct snd_pcm_plugin *plugin_last;
|
||||
|
|
23
ipc/shm.c
23
ipc/shm.c
|
@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
|
|||
if (IS_ERR(shp))
|
||||
return PTR_ERR(shp);
|
||||
|
||||
if (shp->shm_file != sfd->file) {
|
||||
/* ID was reused */
|
||||
shm_unlock(shp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
shp->shm_atim = get_seconds();
|
||||
shp->shm_lprid = task_tgid_vnr(current);
|
||||
shp->shm_nattch++;
|
||||
|
@ -414,8 +420,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
int ret;
|
||||
|
||||
/*
|
||||
* In case of remap_file_pages() emulation, the file can represent
|
||||
* removed IPC ID: propogate shm_lock() error to caller.
|
||||
* In case of remap_file_pages() emulation, the file can represent an
|
||||
* IPC ID that was removed, and possibly even reused by another shm
|
||||
* segment already. Propagate this case as an error to caller.
|
||||
*/
|
||||
ret =__shm_open(vma);
|
||||
if (ret)
|
||||
|
@ -439,6 +446,7 @@ static int shm_release(struct inode *ino, struct file *file)
|
|||
struct shm_file_data *sfd = shm_file_data(file);
|
||||
|
||||
put_ipc_ns(sfd->ns);
|
||||
fput(sfd->file);
|
||||
shm_file_data(file) = NULL;
|
||||
kfree(sfd);
|
||||
return 0;
|
||||
|
@ -1198,7 +1206,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
|
|||
file->f_mapping = shp->shm_file->f_mapping;
|
||||
sfd->id = shp->shm_perm.id;
|
||||
sfd->ns = get_ipc_ns(ns);
|
||||
sfd->file = shp->shm_file;
|
||||
/*
|
||||
* We need to take a reference to the real shm file to prevent the
|
||||
* pointer from becoming stale in cases where the lifetime of the outer
|
||||
* file extends beyond that of the shm segment. It's not usually
|
||||
* possible, but it can happen during remap_file_pages() emulation as
|
||||
* that unmaps the memory, then does ->mmap() via file reference only.
|
||||
* We'll deny the ->mmap() if the shm segment was since removed, but to
|
||||
* detect shm ID reuse we need to compare the file pointers.
|
||||
*/
|
||||
sfd->file = get_file(shp->shm_file);
|
||||
sfd->vm_ops = NULL;
|
||||
|
||||
err = security_mmap_file(file, prot, flags);
|
||||
|
|
|
@ -611,7 +611,8 @@ static int __find_resource(struct resource *root, struct resource *old,
|
|||
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
|
||||
size, constraint->align);
|
||||
alloc.end = alloc.start + size - 1;
|
||||
if (resource_contains(&avail, &alloc)) {
|
||||
if (alloc.start <= alloc.end &&
|
||||
resource_contains(&avail, &alloc)) {
|
||||
new->start = alloc.start;
|
||||
new->end = alloc.end;
|
||||
return 0;
|
||||
|
|
16
mm/filemap.c
16
mm/filemap.c
|
@ -571,7 +571,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|||
VM_BUG_ON_PAGE(!PageLocked(new), new);
|
||||
VM_BUG_ON_PAGE(new->mapping, new);
|
||||
|
||||
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
|
||||
error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
|
||||
if (!error) {
|
||||
struct address_space *mapping = old->mapping;
|
||||
void (*freepage)(struct page *);
|
||||
|
@ -630,7 +630,7 @@ static int __add_to_page_cache_locked(struct page *page,
|
|||
return error;
|
||||
}
|
||||
|
||||
error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
|
||||
error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
|
||||
if (error) {
|
||||
if (!huge)
|
||||
mem_cgroup_cancel_charge(page, memcg);
|
||||
|
@ -1192,8 +1192,7 @@ no_page:
|
|||
if (fgp_flags & FGP_ACCESSED)
|
||||
__SetPageReferenced(page);
|
||||
|
||||
err = add_to_page_cache_lru(page, mapping, offset,
|
||||
gfp_mask & GFP_RECLAIM_MASK);
|
||||
err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
|
||||
if (unlikely(err)) {
|
||||
page_cache_release(page);
|
||||
page = NULL;
|
||||
|
@ -1827,19 +1826,18 @@ EXPORT_SYMBOL(generic_file_read_iter);
|
|||
* This adds the requested page to the page cache if it isn't already there,
|
||||
* and schedules an I/O to read in its contents from disk.
|
||||
*/
|
||||
static int page_cache_read(struct file *file, pgoff_t offset)
|
||||
static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
|
||||
{
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
page = page_cache_alloc_cold(mapping);
|
||||
page = __page_cache_alloc(gfp_mask|__GFP_COLD);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = add_to_page_cache_lru(page, mapping, offset,
|
||||
mapping_gfp_constraint(mapping, GFP_KERNEL));
|
||||
ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
|
||||
if (ret == 0)
|
||||
ret = mapping->a_ops->readpage(file, page);
|
||||
else if (ret == -EEXIST)
|
||||
|
@ -2020,7 +2018,7 @@ no_cached_page:
|
|||
* We're only likely to ever get here if MADV_RANDOM is in
|
||||
* effect.
|
||||
*/
|
||||
error = page_cache_read(file, offset);
|
||||
error = page_cache_read(file, offset, vmf->gfp_mask);
|
||||
|
||||
/*
|
||||
* The page we want has now been added to the page cache.
|
||||
|
|
17
mm/memory.c
17
mm/memory.c
|
@ -1990,6 +1990,20 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
|
|||
copy_user_highpage(dst, src, va, vma);
|
||||
}
|
||||
|
||||
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
|
||||
{
|
||||
struct file *vm_file = vma->vm_file;
|
||||
|
||||
if (vm_file)
|
||||
return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
|
||||
|
||||
/*
|
||||
* Special mappings (e.g. VDSO) do not have any file so fake
|
||||
* a default GFP_KERNEL for them.
|
||||
*/
|
||||
return GFP_KERNEL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify the address space that the page is about to become writable so that
|
||||
* it can prohibit this or wait for the page to get into an appropriate state.
|
||||
|
@ -2005,6 +2019,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
|
|||
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
|
||||
vmf.pgoff = page->index;
|
||||
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
|
||||
vmf.gfp_mask = __get_fault_gfp_mask(vma);
|
||||
vmf.page = page;
|
||||
vmf.cow_page = NULL;
|
||||
|
||||
|
@ -2770,6 +2785,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|||
vmf.pgoff = pgoff;
|
||||
vmf.flags = flags;
|
||||
vmf.page = NULL;
|
||||
vmf.gfp_mask = __get_fault_gfp_mask(vma);
|
||||
vmf.cow_page = cow_page;
|
||||
|
||||
ret = vma->vm_ops->fault(vma, &vmf);
|
||||
|
@ -2936,6 +2952,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
|
|||
vmf.pgoff = pgoff;
|
||||
vmf.max_pgoff = max_pgoff;
|
||||
vmf.flags = flags;
|
||||
vmf.gfp_mask = __get_fault_gfp_mask(vma);
|
||||
vma->vm_ops->map_pages(vma, &vmf);
|
||||
}
|
||||
|
||||
|
|
|
@ -2510,13 +2510,13 @@ void account_page_redirty(struct page *page)
|
|||
if (mapping && mapping_cap_account_dirty(mapping)) {
|
||||
struct inode *inode = mapping->host;
|
||||
struct bdi_writeback *wb;
|
||||
bool locked;
|
||||
struct wb_lock_cookie cookie = {};
|
||||
|
||||
wb = unlocked_inode_to_wb_begin(inode, &locked);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &cookie);
|
||||
current->nr_dirtied--;
|
||||
dec_zone_page_state(page, NR_DIRTIED);
|
||||
dec_wb_stat(wb, WB_DIRTIED);
|
||||
unlocked_inode_to_wb_end(inode, locked);
|
||||
unlocked_inode_to_wb_end(inode, &cookie);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(account_page_redirty);
|
||||
|
@ -2622,15 +2622,15 @@ void cancel_dirty_page(struct page *page)
|
|||
struct inode *inode = mapping->host;
|
||||
struct bdi_writeback *wb;
|
||||
struct mem_cgroup *memcg;
|
||||
bool locked;
|
||||
struct wb_lock_cookie cookie = {};
|
||||
|
||||
memcg = mem_cgroup_begin_page_stat(page);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &locked);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &cookie);
|
||||
|
||||
if (TestClearPageDirty(page))
|
||||
account_page_cleaned(page, mapping, memcg, wb);
|
||||
|
||||
unlocked_inode_to_wb_end(inode, locked);
|
||||
unlocked_inode_to_wb_end(inode, &cookie);
|
||||
mem_cgroup_end_page_stat(memcg);
|
||||
} else {
|
||||
ClearPageDirty(page);
|
||||
|
@ -2663,7 +2663,7 @@ int clear_page_dirty_for_io(struct page *page)
|
|||
struct inode *inode = mapping->host;
|
||||
struct bdi_writeback *wb;
|
||||
struct mem_cgroup *memcg;
|
||||
bool locked;
|
||||
struct wb_lock_cookie cookie = {};
|
||||
|
||||
/*
|
||||
* Yes, Virginia, this is indeed insane.
|
||||
|
@ -2701,14 +2701,14 @@ int clear_page_dirty_for_io(struct page *page)
|
|||
* exclusion.
|
||||
*/
|
||||
memcg = mem_cgroup_begin_page_stat(page);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &locked);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &cookie);
|
||||
if (TestClearPageDirty(page)) {
|
||||
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
|
||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||
dec_wb_stat(wb, WB_RECLAIMABLE);
|
||||
ret = 1;
|
||||
}
|
||||
unlocked_inode_to_wb_end(inode, locked);
|
||||
unlocked_inode_to_wb_end(inode, &cookie);
|
||||
mem_cgroup_end_page_stat(memcg);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3942,7 +3942,8 @@ next:
|
|||
next_reap_node();
|
||||
out:
|
||||
/* Set up the next iteration */
|
||||
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
|
||||
schedule_delayed_work_on(smp_processor_id(), work,
|
||||
round_jiffies_relative(REAPTIMEOUT_AC));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLABINFO
|
||||
|
|
|
@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
|
|||
struct dentry *clnt_dir = pipe_dentry->d_parent;
|
||||
struct dentry *gssd_dir = clnt_dir->d_parent;
|
||||
|
||||
dget(pipe_dentry);
|
||||
__rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
|
||||
__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
|
||||
__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
|
||||
|
|
|
@ -833,8 +833,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
|
|||
return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
|
||||
}
|
||||
|
||||
static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
|
||||
bool trylock)
|
||||
/* parameter locking: returns immediately if tried during streaming */
|
||||
static int lock_params(struct snd_pcm_runtime *runtime)
|
||||
{
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock))
|
||||
return -ERESTARTSYS;
|
||||
if (atomic_read(&runtime->oss.rw_ref)) {
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unlock_params(struct snd_pcm_runtime *runtime)
|
||||
{
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
}
|
||||
|
||||
/* call with params_lock held */
|
||||
static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct snd_pcm_hw_params *params, *sparams;
|
||||
|
@ -848,12 +865,9 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
|
|||
struct snd_mask sformat_mask;
|
||||
struct snd_mask mask;
|
||||
|
||||
if (trylock) {
|
||||
if (!(mutex_trylock(&runtime->oss.params_lock)))
|
||||
return -EAGAIN;
|
||||
} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
|
||||
return -EINTR;
|
||||
sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
|
||||
if (!runtime->oss.params)
|
||||
return 0;
|
||||
sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
|
||||
params = kmalloc(sizeof(*params), GFP_KERNEL);
|
||||
sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
|
||||
if (!sw_params || !params || !sparams) {
|
||||
|
@ -991,7 +1005,6 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
|
|||
goto failure;
|
||||
}
|
||||
|
||||
memset(sw_params, 0, sizeof(*sw_params));
|
||||
if (runtime->oss.trigger) {
|
||||
sw_params->start_threshold = 1;
|
||||
} else {
|
||||
|
@ -1079,6 +1092,23 @@ failure:
|
|||
kfree(sw_params);
|
||||
kfree(params);
|
||||
kfree(sparams);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* this one takes the lock by itself */
|
||||
static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
|
||||
bool trylock)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
int err;
|
||||
|
||||
if (trylock) {
|
||||
if (!(mutex_trylock(&runtime->oss.params_lock)))
|
||||
return -EAGAIN;
|
||||
} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
err = snd_pcm_oss_change_params_locked(substream);
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -1107,6 +1137,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* call with params_lock held */
|
||||
/* NOTE: this always call PREPARE unconditionally no matter whether
|
||||
* runtime->oss.prepare is set or not
|
||||
*/
|
||||
static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
|
||||
{
|
||||
int err;
|
||||
|
@ -1131,14 +1165,35 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
|
|||
struct snd_pcm_runtime *runtime;
|
||||
int err;
|
||||
|
||||
if (substream == NULL)
|
||||
return 0;
|
||||
runtime = substream->runtime;
|
||||
if (runtime->oss.params) {
|
||||
err = snd_pcm_oss_change_params(substream, false);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
if (runtime->oss.prepare) {
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock))
|
||||
return -ERESTARTSYS;
|
||||
err = snd_pcm_oss_prepare(substream);
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* call with params_lock held */
|
||||
static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime;
|
||||
int err;
|
||||
|
||||
runtime = substream->runtime;
|
||||
if (runtime->oss.params) {
|
||||
err = snd_pcm_oss_change_params_locked(substream);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
if (runtime->oss.prepare) {
|
||||
err = snd_pcm_oss_prepare(substream);
|
||||
if (err < 0)
|
||||
|
@ -1367,13 +1422,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
|
|||
if (atomic_read(&substream->mmap_count))
|
||||
return -ENXIO;
|
||||
|
||||
if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
|
||||
return tmp;
|
||||
atomic_inc(&runtime->oss.rw_ref);
|
||||
while (bytes > 0) {
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
|
||||
tmp = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
tmp = snd_pcm_oss_make_ready_locked(substream);
|
||||
if (tmp < 0)
|
||||
goto err;
|
||||
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
|
||||
tmp = bytes;
|
||||
if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
|
||||
|
@ -1429,6 +1486,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
|
|||
}
|
||||
tmp = 0;
|
||||
}
|
||||
atomic_dec(&runtime->oss.rw_ref);
|
||||
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
|
||||
}
|
||||
|
||||
|
@ -1474,13 +1532,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
|
|||
if (atomic_read(&substream->mmap_count))
|
||||
return -ENXIO;
|
||||
|
||||
if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
|
||||
return tmp;
|
||||
atomic_inc(&runtime->oss.rw_ref);
|
||||
while (bytes > 0) {
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
|
||||
tmp = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
tmp = snd_pcm_oss_make_ready_locked(substream);
|
||||
if (tmp < 0)
|
||||
goto err;
|
||||
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
|
||||
if (runtime->oss.buffer_used == 0) {
|
||||
tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
|
||||
|
@ -1521,6 +1581,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
|
|||
}
|
||||
tmp = 0;
|
||||
}
|
||||
atomic_dec(&runtime->oss.rw_ref);
|
||||
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
|
||||
}
|
||||
|
||||
|
@ -1536,10 +1597,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
continue;
|
||||
runtime = substream->runtime;
|
||||
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
|
||||
mutex_lock(&runtime->oss.params_lock);
|
||||
runtime->oss.prepare = 1;
|
||||
runtime->oss.buffer_used = 0;
|
||||
runtime->oss.prev_hw_ptr_period = 0;
|
||||
runtime->oss.period_ptr = 0;
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1625,9 +1688,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
goto __direct;
|
||||
if ((err = snd_pcm_oss_make_ready(substream)) < 0)
|
||||
return err;
|
||||
atomic_inc(&runtime->oss.rw_ref);
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
|
||||
atomic_dec(&runtime->oss.rw_ref);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
format = snd_pcm_oss_format_from(runtime->oss.format);
|
||||
width = snd_pcm_format_physical_width(format);
|
||||
mutex_lock(&runtime->oss.params_lock);
|
||||
if (runtime->oss.buffer_used > 0) {
|
||||
#ifdef OSS_DEBUG
|
||||
pcm_dbg(substream->pcm, "sync: buffer_used\n");
|
||||
|
@ -1637,10 +1704,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
runtime->oss.buffer + runtime->oss.buffer_used,
|
||||
size);
|
||||
err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
|
||||
if (err < 0) {
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
return err;
|
||||
}
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
} else if (runtime->oss.period_ptr > 0) {
|
||||
#ifdef OSS_DEBUG
|
||||
pcm_dbg(substream->pcm, "sync: period_ptr\n");
|
||||
|
@ -1650,10 +1715,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
runtime->oss.buffer,
|
||||
size * 8 / width);
|
||||
err = snd_pcm_oss_sync1(substream, size);
|
||||
if (err < 0) {
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
return err;
|
||||
}
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
}
|
||||
/*
|
||||
* The ALSA's period might be a bit large than OSS one.
|
||||
|
@ -1684,7 +1747,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
snd_pcm_lib_writev(substream, buffers, size);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
atomic_dec(&runtime->oss.rw_ref);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/*
|
||||
* finish sync: drain the buffer
|
||||
*/
|
||||
|
@ -1695,7 +1762,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
substream->f_flags = saved_f_flags;
|
||||
if (err < 0)
|
||||
return err;
|
||||
mutex_lock(&runtime->oss.params_lock);
|
||||
runtime->oss.prepare = 1;
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
}
|
||||
|
||||
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
|
||||
|
@ -1706,8 +1775,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
mutex_lock(&runtime->oss.params_lock);
|
||||
runtime->oss.buffer_used = 0;
|
||||
runtime->oss.prepare = 1;
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1719,6 +1790,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
|
|||
for (idx = 1; idx >= 0; --idx) {
|
||||
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
|
||||
struct snd_pcm_runtime *runtime;
|
||||
int err;
|
||||
|
||||
if (substream == NULL)
|
||||
continue;
|
||||
runtime = substream->runtime;
|
||||
|
@ -1726,10 +1799,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
|
|||
rate = 1000;
|
||||
else if (rate > 192000)
|
||||
rate = 192000;
|
||||
err = lock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (runtime->oss.rate != rate) {
|
||||
runtime->oss.params = 1;
|
||||
runtime->oss.rate = rate;
|
||||
}
|
||||
unlock_params(runtime);
|
||||
}
|
||||
return snd_pcm_oss_get_rate(pcm_oss_file);
|
||||
}
|
||||
|
@ -1754,13 +1831,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
|
|||
for (idx = 1; idx >= 0; --idx) {
|
||||
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
|
||||
struct snd_pcm_runtime *runtime;
|
||||
int err;
|
||||
|
||||
if (substream == NULL)
|
||||
continue;
|
||||
runtime = substream->runtime;
|
||||
err = lock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (runtime->oss.channels != channels) {
|
||||
runtime->oss.params = 1;
|
||||
runtime->oss.channels = channels;
|
||||
}
|
||||
unlock_params(runtime);
|
||||
}
|
||||
return snd_pcm_oss_get_channels(pcm_oss_file);
|
||||
}
|
||||
|
@ -1833,6 +1916,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
|
|||
static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
|
||||
{
|
||||
int formats, idx;
|
||||
int err;
|
||||
|
||||
if (format != AFMT_QUERY) {
|
||||
formats = snd_pcm_oss_get_formats(pcm_oss_file);
|
||||
|
@ -1846,10 +1930,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
|
|||
if (substream == NULL)
|
||||
continue;
|
||||
runtime = substream->runtime;
|
||||
err = lock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (runtime->oss.format != format) {
|
||||
runtime->oss.params = 1;
|
||||
runtime->oss.format = format;
|
||||
}
|
||||
unlock_params(runtime);
|
||||
}
|
||||
}
|
||||
return snd_pcm_oss_get_format(pcm_oss_file);
|
||||
|
@ -1869,8 +1957,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
|
|||
{
|
||||
struct snd_pcm_runtime *runtime;
|
||||
|
||||
if (substream == NULL)
|
||||
return 0;
|
||||
runtime = substream->runtime;
|
||||
if (subdivide == 0) {
|
||||
subdivide = runtime->oss.subdivision;
|
||||
|
@ -1894,9 +1980,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
|
|||
|
||||
for (idx = 1; idx >= 0; --idx) {
|
||||
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
|
||||
struct snd_pcm_runtime *runtime;
|
||||
|
||||
if (substream == NULL)
|
||||
continue;
|
||||
if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
|
||||
runtime = substream->runtime;
|
||||
err = lock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
err = snd_pcm_oss_set_subdivide1(substream, subdivide);
|
||||
unlock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return err;
|
||||
|
@ -1906,8 +2000,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
|
|||
{
|
||||
struct snd_pcm_runtime *runtime;
|
||||
|
||||
if (substream == NULL)
|
||||
return 0;
|
||||
runtime = substream->runtime;
|
||||
if (runtime->oss.subdivision || runtime->oss.fragshift)
|
||||
return -EINVAL;
|
||||
|
@ -1927,9 +2019,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
|
|||
|
||||
for (idx = 1; idx >= 0; --idx) {
|
||||
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
|
||||
struct snd_pcm_runtime *runtime;
|
||||
|
||||
if (substream == NULL)
|
||||
continue;
|
||||
if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
|
||||
runtime = substream->runtime;
|
||||
err = lock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
err = snd_pcm_oss_set_fragment1(substream, val);
|
||||
unlock_params(runtime);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return err;
|
||||
|
@ -2013,6 +2113,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
|
|||
}
|
||||
if (psubstream) {
|
||||
runtime = psubstream->runtime;
|
||||
cmd = 0;
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock))
|
||||
return -ERESTARTSYS;
|
||||
if (trigger & PCM_ENABLE_OUTPUT) {
|
||||
if (runtime->oss.trigger)
|
||||
goto _skip1;
|
||||
|
@ -2030,13 +2133,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
|
|||
cmd = SNDRV_PCM_IOCTL_DROP;
|
||||
runtime->oss.prepare = 1;
|
||||
}
|
||||
_skip1:
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
if (cmd) {
|
||||
err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
_skip1:
|
||||
}
|
||||
if (csubstream) {
|
||||
runtime = csubstream->runtime;
|
||||
cmd = 0;
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock))
|
||||
return -ERESTARTSYS;
|
||||
if (trigger & PCM_ENABLE_INPUT) {
|
||||
if (runtime->oss.trigger)
|
||||
goto _skip2;
|
||||
|
@ -2051,11 +2160,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
|
|||
cmd = SNDRV_PCM_IOCTL_DROP;
|
||||
runtime->oss.prepare = 1;
|
||||
}
|
||||
_skip2:
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
if (cmd) {
|
||||
err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
_skip2:
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2307,6 +2419,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
|
|||
runtime->oss.maxfrags = 0;
|
||||
runtime->oss.subdivision = 0;
|
||||
substream->pcm_release = snd_pcm_oss_release_substream;
|
||||
atomic_set(&runtime->oss.rw_ref, 0);
|
||||
}
|
||||
|
||||
static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <sound/core.h>
|
||||
#include <sound/minors.h>
|
||||
#include <sound/pcm.h>
|
||||
#include <sound/timer.h>
|
||||
#include <sound/control.h>
|
||||
#include <sound/info.h>
|
||||
|
||||
|
@ -1025,8 +1026,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
|
|||
snd_free_pages((void*)runtime->control,
|
||||
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
|
||||
kfree(runtime->hw_constraints.rules);
|
||||
kfree(runtime);
|
||||
/* Avoid concurrent access to runtime via PCM timer interface */
|
||||
if (substream->timer)
|
||||
spin_lock_irq(&substream->timer->lock);
|
||||
substream->runtime = NULL;
|
||||
if (substream->timer)
|
||||
spin_unlock_irq(&substream->timer->lock);
|
||||
kfree(runtime);
|
||||
put_pid(substream->pid);
|
||||
substream->pid = NULL;
|
||||
substream->pstr->substream_opened--;
|
||||
|
|
|
@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
|
|||
struct snd_rawmidi_params params;
|
||||
unsigned int val;
|
||||
|
||||
if (rfile->output == NULL)
|
||||
return -EINVAL;
|
||||
if (get_user(params.stream, &src->stream) ||
|
||||
get_user(params.buffer_size, &src->buffer_size) ||
|
||||
get_user(params.avail_min, &src->avail_min) ||
|
||||
|
@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
|
|||
params.no_active_sensing = val;
|
||||
switch (params.stream) {
|
||||
case SNDRV_RAWMIDI_STREAM_OUTPUT:
|
||||
if (!rfile->output)
|
||||
return -EINVAL;
|
||||
return snd_rawmidi_output_params(rfile->output, ¶ms);
|
||||
case SNDRV_RAWMIDI_STREAM_INPUT:
|
||||
if (!rfile->input)
|
||||
return -EINVAL;
|
||||
return snd_rawmidi_input_params(rfile->input, ¶ms);
|
||||
}
|
||||
return -EINVAL;
|
||||
|
@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
|
|||
int err;
|
||||
struct snd_rawmidi_status status;
|
||||
|
||||
if (rfile->output == NULL)
|
||||
return -EINVAL;
|
||||
if (get_user(status.stream, &src->stream))
|
||||
return -EFAULT;
|
||||
|
||||
switch (status.stream) {
|
||||
case SNDRV_RAWMIDI_STREAM_OUTPUT:
|
||||
if (!rfile->output)
|
||||
return -EINVAL;
|
||||
err = snd_rawmidi_output_status(rfile->output, &status);
|
||||
break;
|
||||
case SNDRV_RAWMIDI_STREAM_INPUT:
|
||||
if (!rfile->input)
|
||||
return -EINVAL;
|
||||
err = snd_rawmidi_input_status(rfile->input, &status);
|
||||
break;
|
||||
default:
|
||||
|
@ -113,16 +117,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
|
|||
int err;
|
||||
struct snd_rawmidi_status status;
|
||||
|
||||
if (rfile->output == NULL)
|
||||
return -EINVAL;
|
||||
if (get_user(status.stream, &src->stream))
|
||||
return -EFAULT;
|
||||
|
||||
switch (status.stream) {
|
||||
case SNDRV_RAWMIDI_STREAM_OUTPUT:
|
||||
if (!rfile->output)
|
||||
return -EINVAL;
|
||||
err = snd_rawmidi_output_status(rfile->output, &status);
|
||||
break;
|
||||
case SNDRV_RAWMIDI_STREAM_INPUT:
|
||||
if (!rfile->input)
|
||||
return -EINVAL;
|
||||
err = snd_rawmidi_input_status(rfile->input, &status);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1549,7 +1549,8 @@ static void azx_check_snoop_available(struct azx *chip)
|
|||
*/
|
||||
u8 val;
|
||||
pci_read_config_byte(chip->pci, 0x42, &val);
|
||||
if (!(val & 0x80) && chip->pci->revision == 0x30)
|
||||
if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
|
||||
chip->pci->revision == 0x20))
|
||||
snoop = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,10 +54,17 @@ struct ssm2602_priv {
|
|||
* using 2 wire for device control, so we cache them instead.
|
||||
* There is no point in caching the reset register
|
||||
*/
|
||||
static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
|
||||
0x0097, 0x0097, 0x0079, 0x0079,
|
||||
0x000a, 0x0008, 0x009f, 0x000a,
|
||||
0x0000, 0x0000
|
||||
static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
|
||||
{ .reg = 0x00, .def = 0x0097 },
|
||||
{ .reg = 0x01, .def = 0x0097 },
|
||||
{ .reg = 0x02, .def = 0x0079 },
|
||||
{ .reg = 0x03, .def = 0x0079 },
|
||||
{ .reg = 0x04, .def = 0x000a },
|
||||
{ .reg = 0x05, .def = 0x0008 },
|
||||
{ .reg = 0x06, .def = 0x009f },
|
||||
{ .reg = 0x07, .def = 0x000a },
|
||||
{ .reg = 0x08, .def = 0x0000 },
|
||||
{ .reg = 0x09, .def = 0x0000 }
|
||||
};
|
||||
|
||||
|
||||
|
@ -618,8 +625,8 @@ const struct regmap_config ssm2602_regmap_config = {
|
|||
.volatile_reg = ssm2602_register_volatile,
|
||||
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
.reg_defaults_raw = ssm2602_reg,
|
||||
.num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
|
||||
.reg_defaults = ssm2602_reg,
|
||||
.num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
|
|||
}
|
||||
|
||||
usb_fill_int_urb(urb, line6->usbdev,
|
||||
usb_sndbulkpipe(line6->usbdev,
|
||||
usb_sndintpipe(line6->usbdev,
|
||||
line6->properties->ep_ctrl_w),
|
||||
transfer_buffer, length, midi_sent, line6,
|
||||
line6->interval);
|
||||
|
|
|
@ -182,8 +182,6 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
|
|||
unsigned char buf2[BUFSZ];
|
||||
size_t ret_len;
|
||||
u64 objdump_addr;
|
||||
const char *objdump_name;
|
||||
char decomp_name[KMOD_DECOMP_LEN];
|
||||
int ret;
|
||||
|
||||
pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
|
||||
|
@ -244,25 +242,9 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
|
|||
state->done[state->done_cnt++] = al.map->start;
|
||||
}
|
||||
|
||||
objdump_name = al.map->dso->long_name;
|
||||
if (dso__needs_decompress(al.map->dso)) {
|
||||
if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
|
||||
decomp_name,
|
||||
sizeof(decomp_name)) < 0) {
|
||||
pr_debug("decompression failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
objdump_name = decomp_name;
|
||||
}
|
||||
|
||||
/* Read the object code using objdump */
|
||||
objdump_addr = map__rip_2objdump(al.map, al.addr);
|
||||
ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
|
||||
|
||||
if (dso__needs_decompress(al.map->dso))
|
||||
unlink(objdump_name);
|
||||
|
||||
ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
|
||||
if (ret > 0) {
|
||||
/*
|
||||
* The kernel maps are inaccurate - assume objdump is right in
|
||||
|
|
|
@ -1270,6 +1270,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
|
|||
intel_pt_clear_tx_flags(decoder);
|
||||
decoder->have_tma = false;
|
||||
decoder->cbr = 0;
|
||||
decoder->timestamp_insn_cnt = 0;
|
||||
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
|
||||
decoder->overflow = true;
|
||||
return -EOVERFLOW;
|
||||
|
@ -1492,6 +1493,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
|
|||
case INTEL_PT_PSBEND:
|
||||
intel_pt_log("ERROR: Missing TIP after FUP\n");
|
||||
decoder->pkt_state = INTEL_PT_STATE_ERR3;
|
||||
decoder->pkt_step = 0;
|
||||
return -ENOENT;
|
||||
|
||||
case INTEL_PT_OVF:
|
||||
|
@ -2152,14 +2154,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
|
|||
return &decoder->state;
|
||||
}
|
||||
|
||||
static bool intel_pt_at_psb(unsigned char *buf, size_t len)
|
||||
{
|
||||
if (len < INTEL_PT_PSB_LEN)
|
||||
return false;
|
||||
return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
|
||||
INTEL_PT_PSB_LEN);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
|
||||
* @buf: pointer to buffer pointer
|
||||
|
@ -2248,6 +2242,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
|
|||
* @buf: buffer
|
||||
* @len: size of buffer
|
||||
* @tsc: TSC value returned
|
||||
* @rem: returns remaining size when TSC is found
|
||||
*
|
||||
* Find a TSC packet in @buf and return the TSC value. This function assumes
|
||||
* that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
|
||||
|
@ -2255,7 +2250,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
|
|||
*
|
||||
* Return: %true if TSC is found, false otherwise.
|
||||
*/
|
||||
static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
|
||||
static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
|
||||
size_t *rem)
|
||||
{
|
||||
struct intel_pt_pkt packet;
|
||||
int ret;
|
||||
|
@ -2266,6 +2262,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
|
|||
return false;
|
||||
if (packet.type == INTEL_PT_TSC) {
|
||||
*tsc = packet.payload;
|
||||
*rem = len;
|
||||
return true;
|
||||
}
|
||||
if (packet.type == INTEL_PT_PSBEND)
|
||||
|
@ -2316,6 +2313,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
|
|||
* @len_a: size of first buffer
|
||||
* @buf_b: second buffer
|
||||
* @len_b: size of second buffer
|
||||
* @consecutive: returns true if there is data in buf_b that is consecutive
|
||||
* to buf_a
|
||||
*
|
||||
* If the trace contains TSC we can look at the last TSC of @buf_a and the
|
||||
* first TSC of @buf_b in order to determine if the buffers overlap, and then
|
||||
|
@ -2328,33 +2327,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
|
|||
static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
|
||||
size_t len_a,
|
||||
unsigned char *buf_b,
|
||||
size_t len_b)
|
||||
size_t len_b, bool *consecutive)
|
||||
{
|
||||
uint64_t tsc_a, tsc_b;
|
||||
unsigned char *p;
|
||||
size_t len;
|
||||
size_t len, rem_a, rem_b;
|
||||
|
||||
p = intel_pt_last_psb(buf_a, len_a);
|
||||
if (!p)
|
||||
return buf_b; /* No PSB in buf_a => no overlap */
|
||||
|
||||
len = len_a - (p - buf_a);
|
||||
if (!intel_pt_next_tsc(p, len, &tsc_a)) {
|
||||
if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
|
||||
/* The last PSB+ in buf_a is incomplete, so go back one more */
|
||||
len_a -= len;
|
||||
p = intel_pt_last_psb(buf_a, len_a);
|
||||
if (!p)
|
||||
return buf_b; /* No full PSB+ => assume no overlap */
|
||||
len = len_a - (p - buf_a);
|
||||
if (!intel_pt_next_tsc(p, len, &tsc_a))
|
||||
if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
|
||||
return buf_b; /* No TSC in buf_a => assume no overlap */
|
||||
}
|
||||
|
||||
while (1) {
|
||||
/* Ignore PSB+ with no TSC */
|
||||
if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
|
||||
intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
|
||||
if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
|
||||
int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
|
||||
|
||||
/* Same TSC, so buffers are consecutive */
|
||||
if (!cmp && rem_b >= rem_a) {
|
||||
*consecutive = true;
|
||||
return buf_b + len_b - (rem_b - rem_a);
|
||||
}
|
||||
if (cmp < 0)
|
||||
return buf_b; /* tsc_a < tsc_b => no overlap */
|
||||
}
|
||||
|
||||
if (!intel_pt_step_psb(&buf_b, &len_b))
|
||||
return buf_b + len_b; /* No PSB in buf_b => no data */
|
||||
|
@ -2368,6 +2375,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
|
|||
* @buf_b: second buffer
|
||||
* @len_b: size of second buffer
|
||||
* @have_tsc: can use TSC packets to detect overlap
|
||||
* @consecutive: returns true if there is data in buf_b that is consecutive
|
||||
* to buf_a
|
||||
*
|
||||
* When trace samples or snapshots are recorded there is the possibility that
|
||||
* the data overlaps. Note that, for the purposes of decoding, data is only
|
||||
|
@ -2378,7 +2387,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
|
|||
*/
|
||||
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
|
||||
unsigned char *buf_b, size_t len_b,
|
||||
bool have_tsc)
|
||||
bool have_tsc, bool *consecutive)
|
||||
{
|
||||
unsigned char *found;
|
||||
|
||||
|
@ -2390,7 +2399,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
|
|||
return buf_b; /* No overlap */
|
||||
|
||||
if (have_tsc) {
|
||||
found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
|
||||
found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
|
||||
consecutive);
|
||||
if (found)
|
||||
return found;
|
||||
}
|
||||
|
@ -2405,28 +2415,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
|
|||
}
|
||||
|
||||
/* Now len_b >= len_a */
|
||||
if (len_b > len_a) {
|
||||
/* The leftover buffer 'b' must start at a PSB */
|
||||
while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
|
||||
if (!intel_pt_step_psb(&buf_a, &len_a))
|
||||
return buf_b; /* No overlap */
|
||||
}
|
||||
}
|
||||
|
||||
while (1) {
|
||||
/* Potential overlap so check the bytes */
|
||||
found = memmem(buf_a, len_a, buf_b, len_a);
|
||||
if (found)
|
||||
if (found) {
|
||||
*consecutive = true;
|
||||
return buf_b + len_a;
|
||||
}
|
||||
|
||||
/* Try again at next PSB in buffer 'a' */
|
||||
if (!intel_pt_step_psb(&buf_a, &len_a))
|
||||
return buf_b; /* No overlap */
|
||||
|
||||
/* The leftover buffer 'b' must start at a PSB */
|
||||
while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
|
||||
if (!intel_pt_step_psb(&buf_a, &len_a))
|
||||
return buf_b; /* No overlap */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
|
|||
|
||||
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
|
||||
unsigned char *buf_b, size_t len_b,
|
||||
bool have_tsc);
|
||||
bool have_tsc, bool *consecutive);
|
||||
|
||||
int intel_pt__strerror(int code, char *buf, size_t buflen);
|
||||
|
||||
|
|
|
@ -125,6 +125,7 @@ struct intel_pt_queue {
|
|||
bool stop;
|
||||
bool step_through_buffers;
|
||||
bool use_buffer_pid_tid;
|
||||
bool sync_switch;
|
||||
pid_t pid, tid;
|
||||
int cpu;
|
||||
int switch_state;
|
||||
|
@ -188,14 +189,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
|
|||
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
|
||||
struct auxtrace_buffer *b)
|
||||
{
|
||||
bool consecutive = false;
|
||||
void *start;
|
||||
|
||||
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
|
||||
pt->have_tsc);
|
||||
pt->have_tsc, &consecutive);
|
||||
if (!start)
|
||||
return -EINVAL;
|
||||
b->use_size = b->data + b->size - start;
|
||||
b->use_data = start;
|
||||
if (b->use_size && consecutive)
|
||||
b->consecutive = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -849,10 +853,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
|
|||
if (pt->timeless_decoding || !pt->have_sched_switch)
|
||||
ptq->use_buffer_pid_tid = true;
|
||||
}
|
||||
|
||||
ptq->sync_switch = pt->sync_switch;
|
||||
}
|
||||
|
||||
if (!ptq->on_heap &&
|
||||
(!pt->sync_switch ||
|
||||
(!ptq->sync_switch ||
|
||||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
|
||||
const struct intel_pt_state *state;
|
||||
int ret;
|
||||
|
@ -1235,7 +1241,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
|
|||
if (pt->synth_opts.last_branch)
|
||||
intel_pt_update_last_branch_rb(ptq);
|
||||
|
||||
if (!pt->sync_switch)
|
||||
if (!ptq->sync_switch)
|
||||
return 0;
|
||||
|
||||
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
|
||||
|
@ -1316,6 +1322,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
|
|||
return switch_ip;
|
||||
}
|
||||
|
||||
static void intel_pt_enable_sync_switch(struct intel_pt *pt)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
pt->sync_switch = true;
|
||||
|
||||
for (i = 0; i < pt->queues.nr_queues; i++) {
|
||||
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
|
||||
struct intel_pt_queue *ptq = queue->priv;
|
||||
|
||||
if (ptq)
|
||||
ptq->sync_switch = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
|
||||
{
|
||||
const struct intel_pt_state *state = ptq->state;
|
||||
|
@ -1332,7 +1353,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
|
|||
if (pt->switch_ip) {
|
||||
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
|
||||
pt->switch_ip, pt->ptss_ip);
|
||||
pt->sync_switch = true;
|
||||
intel_pt_enable_sync_switch(pt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1348,9 +1369,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
|
|||
if (state->err) {
|
||||
if (state->err == INTEL_PT_ERR_NODATA)
|
||||
return 1;
|
||||
if (pt->sync_switch &&
|
||||
if (ptq->sync_switch &&
|
||||
state->from_ip >= pt->kernel_start) {
|
||||
pt->sync_switch = false;
|
||||
ptq->sync_switch = false;
|
||||
intel_pt_next_tid(pt, ptq);
|
||||
}
|
||||
if (pt->synth_opts.errors) {
|
||||
|
@ -1376,7 +1397,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
|
|||
state->timestamp, state->est_timestamp);
|
||||
ptq->timestamp = state->est_timestamp;
|
||||
/* Use estimated TSC in unknown switch state */
|
||||
} else if (pt->sync_switch &&
|
||||
} else if (ptq->sync_switch &&
|
||||
ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
|
||||
intel_pt_is_switch_ip(ptq, state->to_ip) &&
|
||||
ptq->next_tid == -1) {
|
||||
|
@ -1523,7 +1544,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
|
|||
return 1;
|
||||
|
||||
ptq = intel_pt_cpu_to_ptq(pt, cpu);
|
||||
if (!ptq)
|
||||
if (!ptq || !ptq->sync_switch)
|
||||
return 1;
|
||||
|
||||
switch (ptq->switch_state) {
|
||||
|
|
Loading…
Add table
Reference in a new issue