Merge android-4.4.169 (dfca92b) into msm-4.4

* refs/heads/tmp-dfca92b
  Linux 4.4.169
  ALSA: isa/wavefront: prevent some out of bound writes
  rtc: snvs: Add timeouts to avoid kernel lockups
  rtc: snvs: add a missing write sync
  i2c: scmi: Fix probe error on devices with an empty SMB0001 ACPI device node
  i2c: axxia: properly handle master timeout
  cifs: In Kconfig CONFIG_CIFS_POSIX needs depends on legacy (insecure cifs)
  ARM: 8814/1: mm: improve/fix ARM v7_dma_inv_range() unaligned address handling
  mv88e6060: disable hardware level MAC learning
  libata: whitelist all SAMSUNG MZ7KM* solid-state disks
  Input: omap-keypad - fix keyboard debounce configuration
  clk: mmp: Off by one in mmp_clk_add()
  ide: pmac: add of_node_put()
  drivers/tty: add missing of_node_put()
  drivers/sbus/char: add of_node_put()
  sbus: char: add of_node_put()
  SUNRPC: Fix a potential race in xprt_connect()
  bonding: fix 802.3ad state sent to partner when unbinding slave
  ARC: io.h: Implement reads{x}()/writes{x}()
  drm/msm: Grab a vblank reference when waiting for commit_done
  x86/earlyprintk/efi: Fix infinite loop on some screen widths
  scsi: vmw_pscsi: Rearrange code to avoid multiple calls to free_irq during unload
  scsi: libiscsi: Fix NULL pointer dereference in iscsi_eh_session_reset
  mac80211_hwsim: fix module init error paths for netlink
  mac80211: Fix condition validating WMM IE
  mac80211: don't WARN on bad WMM parameters from buggy APs
  f2fs: fix a panic caused by NULL flush_cmd_control
  Revert "drm/rockchip: Allow driver to be shutdown on reboot/kexec"
  powerpc/msi: Fix NULL pointer access in teardown code
  tracing: Fix memory leak of instance function hash filters
  tracing: Fix memory leak in set_trigger_filter()
  MMC: OMAP: fix broken MMC on OMAP15XX/OMAP5910/OMAP310
  aio: fix spectre gadget in lookup_ioctx
  pinctrl: sunxi: a83t: Fix IRQ offset typo for PH11
  powerpc/boot: Fix random libfdt related build errors
  timer/debug: Change /proc/timer_list from 0444 to 0400
  lib/interval_tree_test.c: allow users to limit scope of endpoint
  lib/rbtree-test: lower default params
  lib/rbtree_test.c: make input module parameters
  lib/interval_tree_test.c: allow full tree search
  lib/interval_tree_test.c: make test options module parameters
  ANDROID: Revert fs/squashfs back to linux-4.4.y

Conflicts:
	drivers/gpu/drm/msm/msm_atomic.c

Change-Id: Iecec05c300fb06c0bcdd44a797795e854ea0d0fd
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2018-12-24 12:22:31 +05:30
commit 52be7fe1fa
52 changed files with 1012 additions and 1046 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 168
SUBLEVEL = 169
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@ -85,6 +86,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w;
}
/*
* {read,write}s{b,w,l}() repeatedly access the same IO address in
* native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
* @count times
*/
#define __raw_readsx(t,f) \
static inline void __raw_reads##f(const volatile void __iomem *addr, \
void *ptr, unsigned int count) \
{ \
bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
u##t *buf = ptr; \
\
if (!count) \
return; \
\
/* Some ARC CPU's don't support unaligned accesses */ \
if (is_aligned) { \
do { \
u##t x = __raw_read##f(addr); \
*buf++ = x; \
} while (--count); \
} else { \
do { \
u##t x = __raw_read##f(addr); \
put_unaligned(x, buf++); \
} while (--count); \
} \
}
#define __raw_readsb __raw_readsb
__raw_readsx(8, b)
#define __raw_readsw __raw_readsw
__raw_readsx(16, w)
#define __raw_readsl __raw_readsl
__raw_readsx(32, l)
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@ -117,6 +154,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
#define __raw_writesx(t,f) \
static inline void __raw_writes##f(volatile void __iomem *addr, \
const void *ptr, unsigned int count) \
{ \
bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
const u##t *buf = ptr; \
\
if (!count) \
return; \
\
/* Some ARC CPU's don't support unaligned accesses */ \
if (is_aligned) { \
do { \
__raw_write##f(*buf++, addr); \
} while (--count); \
} else { \
do { \
__raw_write##f(get_unaligned(buf++), addr); \
} while (--count); \
} \
}
#define __raw_writesb __raw_writesb
__raw_writesx(8, b)
#define __raw_writesw __raw_writesw
__raw_writesx(16, w)
#define __raw_writesl __raw_writesl
__raw_writesx(32, l)
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
@ -132,10 +198,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
/*
* Relaxed API for drivers which can handle barrier ordering themselves

View file

@ -359,14 +359,16 @@ ENTRY(v7_dma_inv_range)
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
addne r0, r0, r2
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
cmp r0, r1
1:
mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
addlo r0, r0, r2
cmplo r0, r1
blo 1b
dsb st
ret lr

View file

@ -70,7 +70,8 @@ $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o): \
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o): \
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o \
treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \

View file

@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
phb->controller_ops.teardown_msi_irqs(dev);
/*
* We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
* so check the pointer again.
*/
if (phb->controller_ops.teardown_msi_irqs)
phb->controller_ops.teardown_msi_irqs(dev);
}

View file

@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
num--;
}
if (efi_x >= si->lfb_width) {
if (efi_x + font->width > si->lfb_width) {
efi_x = 0;
efi_y += font->height;
}

View file

@ -4297,6 +4297,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*

View file

@ -182,7 +182,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
pr_err("CLK %d has invalid pointer %p\n", id, clk);
return;
}
if (id > unit->nr_clks) {
if (id >= unit->nr_clks) {
pr_err("CLK %d is invalid\n", id);
return;
}

View file

@ -95,8 +95,13 @@ static void msm_atomic_wait_for_commit_done(
if (old_state->legacy_cursor_update)
continue;
if (drm_crtc_vblank_get(crtc))
continue;
if (kms->funcs->wait_for_crtc_commit_done)
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
drm_crtc_vblank_put(crtc);
}
}

View file

@ -547,11 +547,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
rockchip_drm_platform_remove(pdev);
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@ -561,7 +556,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
.shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,

View file

@ -74,8 +74,7 @@
MST_STATUS_ND)
#define MST_STATUS_ERR (MST_STATUS_NAK | \
MST_STATUS_AL | \
MST_STATUS_IP | \
MST_STATUS_TSS)
MST_STATUS_IP)
#define MST_TX_BYTES_XFRD 0x50
#define MST_RX_BYTES_XFRD 0x54
#define SCL_HIGH_PERIOD 0x80
@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
*/
if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
idev->msg_err = -EPROTO;
i2c_int_disable(idev, ~0);
i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
break;
}
@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
if (status & MST_STATUS_SCC) {
/* Stop completed */
i2c_int_disable(idev, ~0);
i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
} else if (status & MST_STATUS_SNS) {
/* Transfer done */
i2c_int_disable(idev, ~0);
i2c_int_disable(idev, ~MST_STATUS_TSS);
if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
axxia_i2c_empty_rx_fifo(idev);
complete(&idev->msg_complete);
} else if (status & MST_STATUS_TSS) {
/* Transfer timeout */
idev->msg_err = -ETIMEDOUT;
i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
} else if (unlikely(status & MST_STATUS_ERR)) {
/* Transfer error */
i2c_int_disable(idev, ~0);
@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
u32 rx_xfer, tx_xfer;
u32 addr_1, addr_2;
unsigned long time_left;
unsigned int wt_value;
idev->msg = msg;
idev->msg_xfrd = 0;
idev->msg_err = 0;
reinit_completion(&idev->msg_complete);
if (i2c_m_ten(msg)) {
@ -382,9 +386,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
else if (axxia_i2c_fill_tx_fifo(idev) != 0)
int_mask |= MST_STATUS_TFL;
wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
/* Disable wait timer temporarly */
writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
/* Check if timeout error happened */
if (idev->msg_err)
goto out;
/* Start manual mode */
writel(CMD_MANUAL, idev->base + MST_COMMAND);
writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
i2c_int_enable(idev, int_mask);
time_left = wait_for_completion_timeout(&idev->msg_complete,
@ -395,13 +408,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
dev_warn(idev->dev, "busy after xfer\n");
if (time_left == 0)
if (time_left == 0) {
idev->msg_err = -ETIMEDOUT;
if (idev->msg_err == -ETIMEDOUT)
i2c_recover_bus(&idev->adapter);
axxia_i2c_init(idev);
}
if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
out:
if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
idev->msg_err != -ETIMEDOUT)
axxia_i2c_init(idev);
return idev->msg_err;
@ -409,7 +424,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
{
u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
unsigned long time_left;
reinit_completion(&idev->msg_complete);
@ -436,6 +451,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int i;
int ret = 0;
idev->msg_err = 0;
i2c_int_enable(idev, MST_STATUS_TSS);
for (i = 0; ret == 0 && i < num; ++i)
ret = axxia_i2c_xfer_msg(idev, &msgs[i]);

View file

@ -364,6 +364,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
if (!smbus_cmi)
@ -385,8 +386,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
if (smbus_cmi->cap_info == 0)
if (smbus_cmi->cap_info == 0) {
ret = -ENODEV;
goto err;
}
snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
"SMBus CMI adapter %s",
@ -397,7 +400,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
smbus_cmi->adapter.dev.parent = &device->dev;
if (i2c_add_adapter(&smbus_cmi->adapter)) {
ret = i2c_add_adapter(&smbus_cmi->adapter);
if (ret) {
dev_err(&device->dev, "Couldn't register adapter!\n");
goto err;
}
@ -407,7 +411,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
err:
kfree(smbus_cmi);
device->driver_data = NULL;
return -EIO;
return ret;
}
static int acpi_smbus_cmi_remove(struct acpi_device *device)

View file

@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL);
of_node_put(root);
/* Get cable type from device-tree. */
if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook */

View file

@ -60,8 +60,18 @@
/* OMAP4 values */
#define OMAP4_VAL_IRQDISABLE 0x0
#define OMAP4_VAL_DEBOUNCINGTIME 0x7
#define OMAP4_VAL_PVT 0x7
/*
* Errata i689: If a key is released for a time shorter than debounce time,
* the keyboard will idle and never detect the key release. The workaround
* is to use at least a 12ms debounce time. See omap5432 TRM chapter
* "26.4.6.2 Keyboard Controller Timer" for more information.
*/
#define OMAP4_KEYPAD_PTV_DIV_128 0x6
#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
enum {
KBD_REVISION_OMAP4 = 0,
@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
kbd_writel(keypad_data, OMAP4_KBD_CTRL,
OMAP4_DEF_CTRL_NOSOFTMODE |
(OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
(OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
OMAP4_VAL_DEBOUNCINGTIME);
OMAP4_VAL_DEBOUNCINGTIME_16MS);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));

View file

@ -105,6 +105,7 @@ struct mmc_omap_slot {
unsigned int vdd;
u16 saved_con;
u16 bus_mode;
u16 power_mode;
unsigned int fclk_freq;
struct tasklet_struct cover_tasklet;
@ -1156,7 +1157,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmc_omap_slot *slot = mmc_priv(mmc);
struct mmc_omap_host *host = slot->host;
int i, dsor;
int clk_enabled;
int clk_enabled, init_stream;
mmc_omap_select_slot(slot, 0);
@ -1166,6 +1167,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->vdd = ios->vdd;
clk_enabled = 0;
init_stream = 0;
switch (ios->power_mode) {
case MMC_POWER_OFF:
mmc_omap_set_power(slot, 0, ios->vdd);
@ -1173,13 +1175,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
/* Cannot touch dsor yet, just power up MMC */
mmc_omap_set_power(slot, 1, ios->vdd);
slot->power_mode = ios->power_mode;
goto exit;
case MMC_POWER_ON:
mmc_omap_fclk_enable(host, 1);
clk_enabled = 1;
dsor |= 1 << 11;
if (slot->power_mode != MMC_POWER_ON)
init_stream = 1;
break;
}
slot->power_mode = ios->power_mode;
if (slot->bus_mode != ios->bus_mode) {
if (slot->pdata->set_bus_mode != NULL)
@ -1195,7 +1201,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
for (i = 0; i < 2; i++)
OMAP_MMC_WRITE(host, CON, dsor);
slot->saved_con = dsor;
if (ios->power_mode == MMC_POWER_ON) {
if (init_stream) {
/* worst case at 400kHz, 80 cycles makes 200 microsecs */
int usecs = 250;
@ -1233,6 +1239,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->host = host;
slot->mmc = mmc;
slot->id = id;
slot->power_mode = MMC_POWER_UNDEFINED;
slot->pdata = &host->pdata->slots[id];
host->slots[id] = slot;

View file

@ -2011,6 +2011,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);

View file

@ -98,8 +98,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
/* Reset the switch. */
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_SWRESET |
GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
GLOBAL_ATU_CONTROL_LEARNDIS);
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
@ -124,13 +123,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
*/
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
/* Enable automatic address learning, set the address
* database size to 1024 entries, and set the default aging
* time to 5 minutes.
/* Disable automatic address learning.
*/
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
GLOBAL_ATU_CONTROL_LEARNDIS);
return 0;
}

View file

@ -3195,16 +3195,16 @@ static int __init init_mac80211_hwsim(void)
if (err)
return err;
err = hwsim_init_netlink();
if (err)
goto out_unregister_driver;
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
goto out_unregister_driver;
goto out_exit_netlink;
}
err = hwsim_init_netlink();
if (err < 0)
goto out_unregister_driver;
for (i = 0; i < radios; i++) {
struct hwsim_new_radio_params param = { 0 };
@ -3310,6 +3310,8 @@ out_free_mon:
free_netdev(hwsim_mon);
out_free_radios:
mac80211_hwsim_free();
out_exit_netlink:
hwsim_exit_netlink();
out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
return err;

View file

@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
};
static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {

View file

@ -47,49 +47,83 @@ struct snvs_rtc_data {
struct clk *clk;
};
/* Read 64 bit timer register, which could be in inconsistent state */
static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
{
u32 msb, lsb;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
return (u64)msb << 32 | lsb;
}
/* Read the secure real time counter, taking care to deal with the cases of the
* counter updating while being read.
*/
static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
{
u64 read1, read2;
u32 val;
unsigned int timeout = 100;
/* As expected, the registers might update between the read of the LSB
* reg and the MSB reg. It's also possible that one register might be
* in partially modified state as well.
*/
read1 = rtc_read_lpsrt(data);
do {
regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
read1 = val;
read1 <<= 32;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
read1 |= val;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
read2 = val;
read2 <<= 32;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
read2 |= val;
} while (read1 != read2);
read2 = read1;
read1 = rtc_read_lpsrt(data);
} while (read1 != read2 && --timeout);
if (!timeout)
dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
/* Convert 47-bit counter to 32-bit raw second count */
return (u32) (read1 >> CNTR_TO_SECS_SH);
}
static void rtc_write_sync_lp(struct snvs_rtc_data *data)
/* Just read the lsb from the counter, dealing with inconsistent state */
static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
{
u32 count1, count2, count3;
int i;
u32 count1, count2;
unsigned int timeout = 100;
/* Wait for 3 CKIL cycles */
for (i = 0; i < 3; i++) {
do {
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
} while (count1 != count2);
/* Now wait until counter value changes */
do {
do {
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
} while (count2 != count3);
} while (count3 == count1);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
do {
count2 = count1;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
} while (count1 != count2 && --timeout);
if (!timeout) {
dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
return -ETIMEDOUT;
}
*lsb = count1;
return 0;
}
static int rtc_write_sync_lp(struct snvs_rtc_data *data)
{
u32 count1, count2;
u32 elapsed;
unsigned int timeout = 1000;
int ret;
ret = rtc_read_lp_counter_lsb(data, &count1);
if (ret)
return ret;
/* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
do {
ret = rtc_read_lp_counter_lsb(data, &count2);
if (ret)
return ret;
elapsed = count2 - count1; /* wrap around _is_ handled! */
} while (elapsed < 3 && --timeout);
if (!timeout) {
dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to change\n");
return -ETIMEDOUT;
}
return 0;
}
static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
@ -173,9 +207,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
rtc_write_sync_lp(data);
return 0;
return rtc_write_sync_lp(data);
}
static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@ -183,10 +215,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &alrm->time;
unsigned long time;
int ret;
rtc_tm_to_time(alrm_tm, &time);
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
ret = rtc_write_sync_lp(data);
if (ret)
return ret;
regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
/* Clear alarm interrupt status bit */

View file

@ -221,6 +221,7 @@ static int d7s_probe(struct platform_device *op)
dev_set_drvdata(&op->dev, p);
d7s_device = p;
err = 0;
of_node_put(opts);
out:
return err;

View file

@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
pchild->mon_type[len] = ENVCTRL_NOMON;
}
of_node_put(root_node);
return;
}
of_node_put(root_node);
}
/* Get the monitor channels. */

View file

@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
failed:
ISCSI_DBG_EH(session,
"failing session reset: Could not log back into "
"%s, %s [age %d]\n", session->targetname,
conn->persistent_address, session->age);
"%s [age %d]\n", session->targetname,
session->age);
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return FAILED;

View file

@ -1199,8 +1199,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
{
pvscsi_shutdown_intr(adapter);
if (adapter->workqueue)
destroy_workqueue(adapter->workqueue);
@ -1529,6 +1527,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_reset_adapter:
ll_adapter_reset(adapter);
out_release_resources:
pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter);
scsi_host_put(host);
out_disable_device:
@ -1537,6 +1536,7 @@ out_disable_device:
return error;
out_release_resources_and_disable:
pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter);
goto out_disable_device;
}

View file

@ -111,6 +111,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
mode = of_get_property(dp, mode_prop, NULL);
if (!mode)
mode = "9600,8,n,1,-";
of_node_put(dp);
}
cflag = CREAD | HUPCL | CLOCAL;

View file

@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
#include <linux/nospec.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@ -1064,6 +1065,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr)
goto out;
id = array_index_nospec(id, table->nr);
ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
if (percpu_ref_tryget_live(&ctx->users))

View file

@ -111,7 +111,7 @@ config CIFS_XATTR
config CIFS_POSIX
bool "CIFS POSIX Extensions"
depends on CIFS_XATTR
depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
help
Enabling this option will cause the cifs client to attempt to
negotiate a newer dialect with servers, such as Samba 3.0.5

View file

@ -25,6 +25,34 @@ config SQUASHFS
If unsure, say N.
choice
prompt "File decompression options"
depends on SQUASHFS
help
Squashfs now supports two options for decompressing file
data. Traditionally Squashfs has decompressed into an
intermediate buffer and then memcopied it into the page cache.
Squashfs now supports the ability to decompress directly into
the page cache.
If unsure, select "Decompress file data into an intermediate buffer"
config SQUASHFS_FILE_CACHE
bool "Decompress file data into an intermediate buffer"
help
Decompress file data into an intermediate buffer and then
memcopy it into the page cache.
config SQUASHFS_FILE_DIRECT
bool "Decompress files directly into the page cache"
help
Directly decompress file data into the page cache.
Doing so can significantly improve performance because
it eliminates a memcpy and it also removes the lock contention
on the single buffer.
endchoice
choice
prompt "Decompressor parallelisation options"
depends on SQUASHFS

View file

@ -5,7 +5,8 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
squashfs-y += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o

View file

@ -28,12 +28,9 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/workqueue.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@ -41,381 +38,45 @@
#include "decompressor.h"
#include "page_actor.h"
static struct workqueue_struct *squashfs_read_wq;
struct squashfs_read_request {
struct super_block *sb;
u64 index;
int length;
int compressed;
int offset;
u64 read_end;
struct squashfs_page_actor *output;
enum {
SQUASHFS_COPY,
SQUASHFS_DECOMPRESS,
SQUASHFS_METADATA,
} data_processing;
bool synchronous;
/*
* If the read is synchronous, it is possible to retrieve information
* about the request by setting these pointers.
*/
int *res;
int *bytes_read;
int *bytes_uncompressed;
int nr_buffers;
struct buffer_head **bh;
struct work_struct offload;
};
struct squashfs_bio_request {
struct buffer_head **bh;
int nr_buffers;
};
static int squashfs_bio_submit(struct squashfs_read_request *req);
int squashfs_init_read_wq(void)
/*
* Read the metadata block length, this is stored in the first two
* bytes of the metadata block.
*/
static struct buffer_head *get_block_length(struct super_block *sb,
u64 *cur_index, int *offset, int *length)
{
squashfs_read_wq = create_workqueue("SquashFS read wq");
return !!squashfs_read_wq;
}
void squashfs_destroy_read_wq(void)
{
flush_workqueue(squashfs_read_wq);
destroy_workqueue(squashfs_read_wq);
}
static void free_read_request(struct squashfs_read_request *req, int error)
{
if (!req->synchronous)
squashfs_page_actor_free(req->output, error);
if (req->res)
*(req->res) = error;
kfree(req->bh);
kfree(req);
}
static void squashfs_process_blocks(struct squashfs_read_request *req)
{
int error = 0;
int bytes, i, length;
struct squashfs_sb_info *msblk = req->sb->s_fs_info;
struct squashfs_page_actor *actor = req->output;
struct buffer_head **bh = req->bh;
int nr_buffers = req->nr_buffers;
for (i = 0; i < nr_buffers; ++i) {
if (!bh[i])
continue;
wait_on_buffer(bh[i]);
if (!buffer_uptodate(bh[i]))
error = -EIO;
}
if (error)
goto cleanup;
if (req->data_processing == SQUASHFS_METADATA) {
/* Extract the length of the metadata block */
if (req->offset != msblk->devblksize - 1) {
length = le16_to_cpup((__le16 *)
(bh[0]->b_data + req->offset));
} else {
length = (unsigned char)bh[0]->b_data[req->offset];
length |= (unsigned char)bh[1]->b_data[0] << 8;
}
req->compressed = SQUASHFS_COMPRESSED(length);
req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
: SQUASHFS_COPY;
length = SQUASHFS_COMPRESSED_SIZE(length);
if (req->index + length + 2 > req->read_end) {
for (i = 0; i < nr_buffers; ++i)
put_bh(bh[i]);
kfree(bh);
req->length = length;
req->index += 2;
squashfs_bio_submit(req);
return;
}
req->length = length;
req->offset = (req->offset + 2) % PAGE_SIZE;
if (req->offset < 2) {
put_bh(bh[0]);
++bh;
--nr_buffers;
}
}
if (req->bytes_read)
*(req->bytes_read) = req->length;
if (req->data_processing == SQUASHFS_COPY) {
squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
req->length, msblk->devblksize);
} else if (req->data_processing == SQUASHFS_DECOMPRESS) {
req->length = squashfs_decompress(msblk, bh, nr_buffers,
req->offset, req->length, actor);
if (req->length < 0) {
error = -EIO;
goto cleanup;
}
}
/* Last page may have trailing bytes not filled */
bytes = req->length % PAGE_SIZE;
if (bytes && actor->page[actor->pages - 1])
zero_user_segment(actor->page[actor->pages - 1], bytes,
PAGE_SIZE);
cleanup:
if (req->bytes_uncompressed)
*(req->bytes_uncompressed) = req->length;
if (error) {
for (i = 0; i < nr_buffers; ++i)
if (bh[i])
put_bh(bh[i]);
}
free_read_request(req, error);
}
static void read_wq_handler(struct work_struct *work)
{
squashfs_process_blocks(container_of(work,
struct squashfs_read_request, offload));
}
static void squashfs_bio_end_io(struct bio *bio)
{
int i;
int error = bio->bi_error;
struct squashfs_bio_request *bio_req = bio->bi_private;
bio_put(bio);
for (i = 0; i < bio_req->nr_buffers; ++i) {
if (!bio_req->bh[i])
continue;
if (!error)
set_buffer_uptodate(bio_req->bh[i]);
else
clear_buffer_uptodate(bio_req->bh[i]);
unlock_buffer(bio_req->bh[i]);
}
kfree(bio_req);
}
static int bh_is_optional(struct squashfs_read_request *req, int idx)
{
int start_idx, end_idx;
struct squashfs_sb_info *msblk = req->sb->s_fs_info;
start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
if (start_idx >= req->output->pages)
return 1;
if (start_idx < 0)
start_idx = end_idx;
if (end_idx >= req->output->pages)
end_idx = start_idx;
return !req->output->page[start_idx] && !req->output->page[end_idx];
}
static int actor_getblks(struct squashfs_read_request *req, u64 block)
{
int i;
req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
if (!req->bh)
return -ENOMEM;
for (i = 0; i < req->nr_buffers; ++i) {
/*
* When dealing with an uncompressed block, the actor may
* contains NULL pages. There's no need to read the buffers
* associated with these pages.
*/
if (!req->compressed && bh_is_optional(req, i)) {
req->bh[i] = NULL;
continue;
}
req->bh[i] = sb_getblk(req->sb, block + i);
if (!req->bh[i]) {
while (--i) {
if (req->bh[i])
put_bh(req->bh[i]);
}
return -1;
}
}
return 0;
}
static int squashfs_bio_submit(struct squashfs_read_request *req)
{
struct bio *bio = NULL;
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head *bh;
struct squashfs_bio_request *bio_req = NULL;
int b = 0, prev_block = 0;
struct squashfs_sb_info *msblk = req->sb->s_fs_info;
u64 read_start = round_down(req->index, msblk->devblksize);
u64 read_end = round_up(req->index + req->length, msblk->devblksize);
sector_t block = read_start >> msblk->devblksize_log2;
sector_t block_end = read_end >> msblk->devblksize_log2;
int offset = read_start - round_down(req->index, PAGE_SIZE);
int nr_buffers = block_end - block;
int blksz = msblk->devblksize;
int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
: nr_buffers;
bh = sb_bread(sb, *cur_index);
if (bh == NULL)
return NULL;
/* Setup the request */
req->read_end = read_end;
req->offset = req->index - read_start;
req->nr_buffers = nr_buffers;
if (actor_getblks(req, block) < 0)
goto getblk_failed;
if (msblk->devblksize - *offset == 1) {
*length = (unsigned char) bh->b_data[*offset];
put_bh(bh);
bh = sb_bread(sb, ++(*cur_index));
if (bh == NULL)
return NULL;
*length |= (unsigned char) bh->b_data[0] << 8;
*offset = 1;
} else {
*length = (unsigned char) bh->b_data[*offset] |
(unsigned char) bh->b_data[*offset + 1] << 8;
*offset += 2;
/* Create and submit the BIOs */
for (b = 0; b < nr_buffers; ++b, offset += blksz) {
bh = req->bh[b];
if (!bh || !trylock_buffer(bh))
continue;
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
continue;
if (*offset == msblk->devblksize) {
put_bh(bh);
bh = sb_bread(sb, ++(*cur_index));
if (bh == NULL)
return NULL;
*offset = 0;
}
offset %= PAGE_SIZE;
/* Append the buffer to the current BIO if it is contiguous */
if (bio && bio_req && prev_block + 1 == b) {
if (bio_add_page(bio, bh->b_page, blksz, offset)) {
bio_req->nr_buffers += 1;
prev_block = b;
continue;
}
}
/* Otherwise, submit the current BIO and create a new one */
if (bio)
submit_bio(READ, bio);
bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
GFP_NOIO);
if (!bio_req)
goto req_alloc_failed;
bio_req->bh = &req->bh[b];
bio = bio_alloc(GFP_NOIO, bio_max_pages);
if (!bio)
goto bio_alloc_failed;
bio->bi_bdev = req->sb->s_bdev;
bio->bi_iter.bi_sector = (block + b)
<< (msblk->devblksize_log2 - 9);
bio->bi_private = bio_req;
bio->bi_end_io = squashfs_bio_end_io;
bio_add_page(bio, bh->b_page, blksz, offset);
bio_req->nr_buffers += 1;
prev_block = b;
}
if (bio)
submit_bio(READ, bio);
if (req->synchronous)
squashfs_process_blocks(req);
else {
INIT_WORK(&req->offload, read_wq_handler);
schedule_work(&req->offload);
}
return 0;
bio_alloc_failed:
kfree(bio_req);
req_alloc_failed:
unlock_buffer(bh);
while (--nr_buffers >= b)
if (req->bh[nr_buffers])
put_bh(req->bh[nr_buffers]);
while (--b >= 0)
if (req->bh[b])
wait_on_buffer(req->bh[b]);
getblk_failed:
free_read_request(req, -ENOMEM);
return -ENOMEM;
return bh;
}
static int read_metadata_block(struct squashfs_read_request *req,
u64 *next_index)
{
int ret, error, bytes_read = 0, bytes_uncompressed = 0;
struct squashfs_sb_info *msblk = req->sb->s_fs_info;
if (req->index + 2 > msblk->bytes_used) {
free_read_request(req, -EINVAL);
return -EINVAL;
}
req->length = 2;
/* Do not read beyond the end of the device */
if (req->index + req->length > msblk->bytes_used)
req->length = msblk->bytes_used - req->index;
req->data_processing = SQUASHFS_METADATA;
/*
* Reading metadata is always synchronous because we don't know the
* length in advance and the function is expected to update
* 'next_index' and return the length.
*/
req->synchronous = true;
req->res = &error;
req->bytes_read = &bytes_read;
req->bytes_uncompressed = &bytes_uncompressed;
TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
req->index, req->compressed ? "" : "un", bytes_read,
req->output->length);
ret = squashfs_bio_submit(req);
if (ret)
return ret;
if (error)
return error;
if (next_index)
*next_index += 2 + bytes_read;
return bytes_uncompressed;
}
static int read_data_block(struct squashfs_read_request *req, int length,
u64 *next_index, bool synchronous)
{
int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
: SQUASHFS_COPY;
req->synchronous = synchronous;
if (synchronous) {
req->res = &error;
req->bytes_read = &bytes_read;
req->bytes_uncompressed = &bytes_uncompressed;
}
TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
req->index, req->compressed ? "" : "un", req->length,
req->output->length);
ret = squashfs_bio_submit(req);
if (ret)
return ret;
if (synchronous)
ret = error ? error : bytes_uncompressed;
if (next_index)
*next_index += length;
return ret;
}
/*
* Read and decompress a metadata block or datablock. Length is non-zero
@ -426,50 +87,130 @@ static int read_data_block(struct squashfs_read_request *req, int length,
* generated a larger block - this does occasionally happen with compression
* algorithms).
*/
static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
u64 *next_index, struct squashfs_page_actor *output, bool sync)
{
struct squashfs_read_request *req;
req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
if (!req) {
if (!sync)
squashfs_page_actor_free(output, -ENOMEM);
return -ENOMEM;
}
req->sb = sb;
req->index = index;
req->output = output;
if (next_index)
*next_index = index;
if (length)
length = read_data_block(req, length, next_index, sync);
else
length = read_metadata_block(req, next_index);
if (length < 0) {
ERROR("squashfs_read_data failed to read block 0x%llx\n",
(unsigned long long)index);
return -EIO;
}
return length;
}
int squashfs_read_data(struct super_block *sb, u64 index, int length,
u64 *next_index, struct squashfs_page_actor *output)
u64 *next_index, struct squashfs_page_actor *output)
{
return __squashfs_read_data(sb, index, length, next_index, output,
true);
}
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head **bh;
int offset = index & ((1 << msblk->devblksize_log2) - 1);
u64 cur_index = index >> msblk->devblksize_log2;
int bytes, compressed, b = 0, k = 0, avail, i;
int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
u64 *next_index, struct squashfs_page_actor *output)
{
bh = kcalloc(((output->length + msblk->devblksize - 1)
>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
if (bh == NULL)
return -ENOMEM;
return __squashfs_read_data(sb, index, length, next_index, output,
false);
if (length) {
/*
* Datablock.
*/
bytes = -offset;
compressed = SQUASHFS_COMPRESSED_BLOCK(length);
length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
if (next_index)
*next_index = index + length;
TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
index, compressed ? "" : "un", length, output->length);
if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used)
goto read_failure;
for (b = 0; bytes < length; b++, cur_index++) {
bh[b] = sb_getblk(sb, cur_index);
if (bh[b] == NULL)
goto block_release;
bytes += msblk->devblksize;
}
ll_rw_block(READ, b, bh);
} else {
/*
* Metadata block.
*/
if ((index + 2) > msblk->bytes_used)
goto read_failure;
bh[0] = get_block_length(sb, &cur_index, &offset, &length);
if (bh[0] == NULL)
goto read_failure;
b = 1;
bytes = msblk->devblksize - offset;
compressed = SQUASHFS_COMPRESSED(length);
length = SQUASHFS_COMPRESSED_SIZE(length);
if (next_index)
*next_index = index + length + 2;
TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
compressed ? "" : "un", length);
if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used)
goto block_release;
for (; bytes < length; b++) {
bh[b] = sb_getblk(sb, ++cur_index);
if (bh[b] == NULL)
goto block_release;
bytes += msblk->devblksize;
}
ll_rw_block(READ, b - 1, bh + 1);
}
for (i = 0; i < b; i++) {
wait_on_buffer(bh[i]);
if (!buffer_uptodate(bh[i]))
goto block_release;
}
if (compressed) {
if (!msblk->stream)
goto read_failure;
length = squashfs_decompress(msblk, bh, b, offset, length,
output);
if (length < 0)
goto read_failure;
} else {
/*
* Block is uncompressed.
*/
int in, pg_offset = 0;
void *data = squashfs_first_page(output);
for (bytes = length; k < b; k++) {
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
while (in) {
if (pg_offset == PAGE_CACHE_SIZE) {
data = squashfs_next_page(output);
pg_offset = 0;
}
avail = min_t(int, in, PAGE_CACHE_SIZE -
pg_offset);
memcpy(data + pg_offset, bh[k]->b_data + offset,
avail);
in -= avail;
pg_offset += avail;
offset += avail;
}
offset = 0;
put_bh(bh[k]);
}
squashfs_finish_page(output);
}
kfree(bh);
return length;
block_release:
for (; k < b; k++)
put_bh(bh[k]);
read_failure:
ERROR("squashfs_read_data failed to read block 0x%llx\n",
(unsigned long long) index);
kfree(bh);
return -EIO;
}

View file

@ -209,14 +209,17 @@ void squashfs_cache_put(struct squashfs_cache_entry *entry)
*/
void squashfs_cache_delete(struct squashfs_cache *cache)
{
int i;
int i, j;
if (cache == NULL)
return;
for (i = 0; i < cache->entries; i++) {
if (cache->entry[i].page)
free_page_array(cache->entry[i].page, cache->pages);
if (cache->entry[i].data) {
for (j = 0; j < cache->pages; j++)
kfree(cache->entry[i].data[j]);
kfree(cache->entry[i].data);
}
kfree(cache->entry[i].actor);
}
@ -233,7 +236,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
{
int i;
int i, j;
struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL) {
@ -265,13 +268,22 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
init_waitqueue_head(&cache->entry[i].wait_queue);
entry->cache = cache;
entry->block = SQUASHFS_INVALID_BLK;
entry->page = alloc_page_array(cache->pages, GFP_KERNEL);
if (!entry->page) {
entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
if (entry->data == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
}
entry->actor = squashfs_page_actor_init(entry->page,
cache->pages, 0, NULL);
for (j = 0; j < cache->pages; j++) {
entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
if (entry->data[j] == NULL) {
ERROR("Failed to allocate %s buffer\n", name);
goto cleanup;
}
}
entry->actor = squashfs_page_actor_init(entry->data,
cache->pages, 0);
if (entry->actor == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
@ -302,20 +314,18 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
return min(length, entry->length - offset);
while (offset < entry->length) {
void *buff = kmap_atomic(entry->page[offset / PAGE_CACHE_SIZE])
+ (offset % PAGE_CACHE_SIZE);
void *buff = entry->data[offset / PAGE_CACHE_SIZE]
+ (offset % PAGE_CACHE_SIZE);
int bytes = min_t(int, entry->length - offset,
PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
if (bytes >= remaining) {
memcpy(buffer, buff, remaining);
kunmap_atomic(buff);
remaining = 0;
break;
}
memcpy(buffer, buff, bytes);
kunmap_atomic(buff);
buffer += bytes;
remaining -= bytes;
offset += bytes;
@ -409,38 +419,43 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
{
int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
struct page **page;
void *buff;
int res;
int i, res;
void *table, *buffer, **data;
struct squashfs_page_actor *actor;
page = alloc_page_array(pages, GFP_KERNEL);
if (!page)
table = buffer = kmalloc(length, GFP_KERNEL);
if (table == NULL)
return ERR_PTR(-ENOMEM);
actor = squashfs_page_actor_init(page, pages, length, NULL);
if (actor == NULL) {
data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
if (data == NULL) {
res = -ENOMEM;
goto failed;
}
actor = squashfs_page_actor_init(data, pages, length);
if (actor == NULL) {
res = -ENOMEM;
goto failed2;
}
for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
data[i] = buffer;
res = squashfs_read_data(sb, block, length |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
if (res < 0)
goto failed2;
kfree(data);
kfree(actor);
buff = kmalloc(length, GFP_KERNEL);
if (!buff)
goto failed2;
squashfs_actor_to_buf(actor, buff, length);
squashfs_page_actor_free(actor, 0);
free_page_array(page, pages);
return buff;
if (res < 0)
goto failed;
return table;
failed2:
squashfs_page_actor_free(actor, 0);
kfree(data);
failed:
free_page_array(page, pages);
kfree(table);
return ERR_PTR(res);
}

View file

@ -24,8 +24,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@ -95,44 +94,40 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
void *comp_opts, *buffer = NULL;
struct page *page;
void *buffer = NULL, *comp_opts;
struct squashfs_page_actor *actor = NULL;
int length = 0;
if (!SQUASHFS_COMP_OPTS(flags))
return squashfs_comp_opts(msblk, buffer, length);
/*
* Read decompressor specific options from file system if present
*/
if (SQUASHFS_COMP_OPTS(flags)) {
buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
if (buffer == NULL) {
comp_opts = ERR_PTR(-ENOMEM);
goto out;
}
page = alloc_page(GFP_KERNEL);
if (!page)
return ERR_PTR(-ENOMEM);
actor = squashfs_page_actor_init(&buffer, 1, 0);
if (actor == NULL) {
comp_opts = ERR_PTR(-ENOMEM);
goto out;
}
actor = squashfs_page_actor_init(&page, 1, 0, NULL);
if (actor == NULL) {
comp_opts = ERR_PTR(-ENOMEM);
goto actor_error;
length = squashfs_read_data(sb,
sizeof(struct squashfs_super_block), 0, NULL, actor);
if (length < 0) {
comp_opts = ERR_PTR(length);
goto out;
}
}
length = squashfs_read_data(sb,
sizeof(struct squashfs_super_block), 0, NULL, actor);
if (length < 0) {
comp_opts = ERR_PTR(length);
goto read_error;
}
buffer = kmap_atomic(page);
comp_opts = squashfs_comp_opts(msblk, buffer, length);
kunmap_atomic(buffer);
read_error:
squashfs_page_actor_free(actor, 0);
actor_error:
__free_page(page);
out:
kfree(actor);
kfree(buffer);
return comp_opts;
}

View file

@ -47,16 +47,12 @@
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
#include <linux/mm_inline.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
// Backported from 4.5
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
/*
* Locate cache slot in range [offset, index] for specified inode. If
* there's more than one return the slot closest to index.
@ -446,21 +442,6 @@ static int squashfs_readpage_fragment(struct page *page)
return res;
}
static int squashfs_readpages_fragment(struct page *page,
struct list_head *readahead_pages, struct address_space *mapping)
{
if (!page) {
page = lru_to_page(readahead_pages);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
mapping_gfp_constraint(mapping, GFP_KERNEL))) {
put_page(page);
return 0;
}
}
return squashfs_readpage_fragment(page);
}
static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
{
struct inode *inode = page->mapping->host;
@ -473,105 +454,54 @@ static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
return 0;
}
static int squashfs_readpages_sparse(struct page *page,
struct list_head *readahead_pages, int index, int file_end,
struct address_space *mapping)
{
if (!page) {
page = lru_to_page(readahead_pages);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
mapping_gfp_constraint(mapping, GFP_KERNEL))) {
put_page(page);
return 0;
}
}
return squashfs_readpage_sparse(page, index, file_end);
}
static int __squashfs_readpages(struct file *file, struct page *page,
struct list_head *readahead_pages, unsigned int nr_pages,
struct address_space *mapping)
{
struct inode *inode = mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int file_end = i_size_read(inode) >> msblk->block_log;
int res;
do {
struct page *cur_page = page ? page
: lru_to_page(readahead_pages);
int page_index = cur_page->index;
int index = page_index >> (msblk->block_log - PAGE_CACHE_SHIFT);
if (page_index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT))
return 1;
if (index < file_end || squashfs_i(inode)->fragment_block ==
SQUASHFS_INVALID_BLK) {
u64 block = 0;
int bsize = read_blocklist(inode, index, &block);
if (bsize < 0)
return -1;
if (bsize == 0) {
res = squashfs_readpages_sparse(page,
readahead_pages, index, file_end,
mapping);
} else {
res = squashfs_readpages_block(page,
readahead_pages, &nr_pages, mapping,
page_index, block, bsize);
}
} else {
res = squashfs_readpages_fragment(page,
readahead_pages, mapping);
}
if (res)
return 0;
page = NULL;
} while (readahead_pages && !list_empty(readahead_pages));
return 0;
}
static int squashfs_readpage(struct file *file, struct page *page)
{
int ret;
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
int res;
void *pageaddr;
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
page->index, squashfs_i(page->mapping->host)->start);
page->index, squashfs_i(inode)->start);
get_page(page);
if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT))
goto out;
ret = __squashfs_readpages(file, page, NULL, 1, page->mapping);
if (ret) {
flush_dcache_page(page);
if (ret < 0)
SetPageError(page);
if (index < file_end || squashfs_i(inode)->fragment_block ==
SQUASHFS_INVALID_BLK) {
u64 block = 0;
int bsize = read_blocklist(inode, index, &block);
if (bsize < 0)
goto error_out;
if (bsize == 0)
res = squashfs_readpage_sparse(page, index, file_end);
else
SetPageUptodate(page);
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
put_page(page);
}
res = squashfs_readpage_block(page, block, bsize);
} else
res = squashfs_readpage_fragment(page);
return 0;
}
if (!res)
return 0;
error_out:
SetPageError(page);
out:
pageaddr = kmap_atomic(page);
memset(pageaddr, 0, PAGE_CACHE_SIZE);
kunmap_atomic(pageaddr);
flush_dcache_page(page);
if (!PageError(page))
SetPageUptodate(page);
unlock_page(page);
static int squashfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned int nr_pages)
{
TRACE("Entered squashfs_readpages, %u pages, first page index %lx\n",
nr_pages, lru_to_page(pages)->index);
__squashfs_readpages(file, NULL, pages, nr_pages, mapping);
return 0;
}
const struct address_space_operations squashfs_aops = {
.readpage = squashfs_readpage,
.readpages = squashfs_readpages,
.readpage = squashfs_readpage
};

38
fs/squashfs/file_cache.c Normal file
View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2013
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
/* Read separately compressed datablock and memcopy into page cache */
int squashfs_readpage_block(struct page *page, u64 block, int bsize)
{
struct inode *i = page->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int res = buffer->error;
if (res)
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
else
squashfs_copy_cache(page, buffer, buffer->length, 0);
squashfs_cache_put(buffer);
return res;
}

View file

@ -13,7 +13,6 @@
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
#include <linux/mm_inline.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@ -21,139 +20,157 @@
#include "squashfs.h"
#include "page_actor.h"
// Backported from 4.5
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
int pages, struct page **page);
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
static void release_actor_pages(struct page **page, int pages, int error)
{
int i;
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
for (i = 0; i < pages; i++) {
if (!page[i])
continue;
flush_dcache_page(page[i]);
if (!error)
SetPageUptodate(page[i]);
else {
SetPageError(page[i]);
zero_user_segment(page[i], 0, PAGE_CACHE_SIZE);
}
unlock_page(page[i]);
put_page(page[i]);
}
kfree(page);
}
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
static struct squashfs_page_actor *actor_from_page_cache(
unsigned int actor_pages, struct page *target_page,
struct list_head *rpages, unsigned int *nr_pages, int start_index,
struct address_space *mapping)
{
int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
int i, n, pages, missing_pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
int i, n;
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
void *pageaddr;
page = kmalloc_array(actor_pages, sizeof(void *), GFP_KERNEL);
if (!page)
return NULL;
if (end_index > file_end)
end_index = file_end;
for (i = 0, n = start_index; i < actor_pages; i++, n++) {
if (target_page == NULL && rpages && !list_empty(rpages)) {
struct page *cur_page = lru_to_page(rpages);
pages = end_index - start_index + 1;
if (cur_page->index < start_index + actor_pages) {
list_del(&cur_page->lru);
--(*nr_pages);
if (add_to_page_cache_lru(cur_page, mapping,
cur_page->index, gfp))
put_page(cur_page);
else
target_page = cur_page;
} else
rpages = NULL;
}
page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
if (page == NULL)
return res;
if (target_page && target_page->index == n) {
page[i] = target_page;
target_page = NULL;
} else {
page[i] = grab_cache_page_nowait(mapping, n);
if (page[i] == NULL)
continue;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(page, pages, 0);
if (actor == NULL)
goto out;
/* Try to grab all the pages covered by the Squashfs block */
for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
page[i] = (n == target_page->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL) {
missing_pages++;
continue;
}
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
page_cache_release(page[i]);
page[i] = NULL;
missing_pages++;
}
}
actor = squashfs_page_actor_init(page, actor_pages, 0,
release_actor_pages);
if (!actor) {
release_actor_pages(page, actor_pages, -ENOMEM);
kfree(page);
return NULL;
if (missing_pages) {
/*
* Couldn't get one or more pages, this page has either
* been VM reclaimed, but others are still in the page cache
* and uptodate, or we're racing with another thread in
* squashfs_readpage also trying to grab them. Fall back to
* using an intermediate buffer.
*/
res = squashfs_read_cache(target_page, block, bsize, pages,
page);
if (res < 0)
goto mark_errored;
goto out;
}
return actor;
}
int squashfs_readpages_block(struct page *target_page,
struct list_head *readahead_pages,
unsigned int *nr_pages,
struct address_space *mapping,
int page_index, u64 block, int bsize)
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
if (res < 0)
goto mark_errored;
{
struct squashfs_page_actor *actor;
struct inode *inode = mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int start_index, end_index, file_end, actor_pages, res;
int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
/* Last page may have trailing bytes not filled */
bytes = res % PAGE_CACHE_SIZE;
if (bytes) {
pageaddr = kmap_atomic(page[pages - 1]);
memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
kunmap_atomic(pageaddr);
}
/*
* If readpage() is called on an uncompressed datablock, we can just
* read the pages instead of fetching the whole block.
* This greatly improves the performance when a process keep doing
* random reads because we only fetch the necessary data.
* The readahead algorithm will take care of doing speculative reads
* if necessary.
* We can't read more than 1 block even if readahead provides use more
* pages because we don't know yet if the next block is compressed or
* not.
/* Mark pages as uptodate, unlock and release */
for (i = 0; i < pages; i++) {
flush_dcache_page(page[i]);
SetPageUptodate(page[i]);
unlock_page(page[i]);
if (page[i] != target_page)
page_cache_release(page[i]);
}
kfree(actor);
kfree(page);
return 0;
mark_errored:
/* Decompression failed, mark pages as errored. Target_page is
* dealt with by the caller
*/
if (bsize && !SQUASHFS_COMPRESSED_BLOCK(bsize)) {
u64 block_end = block + msblk->block_size;
block += (page_index & mask) * PAGE_CACHE_SIZE;
actor_pages = (block_end - block) / PAGE_CACHE_SIZE;
if (*nr_pages < actor_pages)
actor_pages = *nr_pages;
start_index = page_index;
bsize = min_t(int, bsize, (PAGE_CACHE_SIZE * actor_pages)
| SQUASHFS_COMPRESSED_BIT_BLOCK);
} else {
file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
start_index = page_index & ~mask;
end_index = start_index | mask;
if (end_index > file_end)
end_index = file_end;
actor_pages = end_index - start_index + 1;
for (i = 0; i < pages; i++) {
if (page[i] == NULL || page[i] == target_page)
continue;
flush_dcache_page(page[i]);
SetPageError(page[i]);
unlock_page(page[i]);
page_cache_release(page[i]);
}
actor = actor_from_page_cache(actor_pages, target_page,
readahead_pages, nr_pages, start_index,
mapping);
if (!actor)
return -ENOMEM;
res = squashfs_read_data_async(inode->i_sb, block, bsize, NULL,
actor);
return res < 0 ? res : 0;
out:
kfree(actor);
kfree(page);
return res;
}
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
int pages, struct page **page)
{
struct inode *i = target_page->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int bytes = buffer->length, res = buffer->error, n, offset = 0;
void *pageaddr;
if (res) {
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
goto out;
}
for (n = 0; n < pages && bytes > 0; n++,
bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
if (page[n] == NULL)
continue;
pageaddr = kmap_atomic(page[n]);
squashfs_copy_data(pageaddr, buffer, offset, avail);
memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
kunmap_atomic(pageaddr);
flush_dcache_page(page[n]);
SetPageUptodate(page[n]);
unlock_page(page[n]);
if (page[n] != target_page)
page_cache_release(page[n]);
}
out:
squashfs_cache_put(buffer);
return res;
}

View file

@ -94,17 +94,39 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
int res;
size_t dest_len = output->length;
struct squashfs_lz4 *stream = strm;
void *buff = stream->input, *data;
int avail, i, bytes = length, res;
size_t dest_len = output->length;
for (i = 0; i < b; i++) {
avail = min(bytes, msblk->devblksize - offset);
memcpy(buff, bh[i]->b_data + offset, avail);
buff += avail;
bytes -= avail;
offset = 0;
put_bh(bh[i]);
}
squashfs_bh_to_buf(bh, b, stream->input, offset, length,
msblk->devblksize);
res = lz4_decompress_unknownoutputsize(stream->input, length,
stream->output, &dest_len);
if (res)
return -EIO;
squashfs_buf_to_actor(stream->output, output, dest_len);
bytes = dest_len;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
if (bytes <= PAGE_CACHE_SIZE) {
memcpy(data, buff, bytes);
break;
}
memcpy(data, buff, PAGE_CACHE_SIZE);
buff += PAGE_CACHE_SIZE;
bytes -= PAGE_CACHE_SIZE;
data = squashfs_next_page(output);
}
squashfs_finish_page(output);
return dest_len;
}

View file

@ -79,19 +79,45 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
int res;
size_t out_len = output->length;
struct squashfs_lzo *stream = strm;
void *buff = stream->input, *data;
int avail, i, bytes = length, res;
size_t out_len = output->length;
for (i = 0; i < b; i++) {
avail = min(bytes, msblk->devblksize - offset);
memcpy(buff, bh[i]->b_data + offset, avail);
buff += avail;
bytes -= avail;
offset = 0;
put_bh(bh[i]);
}
squashfs_bh_to_buf(bh, b, stream->input, offset, length,
msblk->devblksize);
res = lzo1x_decompress_safe(stream->input, (size_t)length,
stream->output, &out_len);
if (res != LZO_E_OK)
return -EIO;
squashfs_buf_to_actor(stream->output, output, out_len);
goto failed;
return out_len;
res = bytes = (int)out_len;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
if (bytes <= PAGE_CACHE_SIZE) {
memcpy(data, buff, bytes);
break;
} else {
memcpy(data, buff, PAGE_CACHE_SIZE);
buff += PAGE_CACHE_SIZE;
bytes -= PAGE_CACHE_SIZE;
data = squashfs_next_page(output);
}
}
squashfs_finish_page(output);
return res;
failed:
return -EIO;
}
const struct squashfs_decompressor squashfs_lzo_comp_ops = {

View file

@ -9,11 +9,79 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include "page_actor.h"
struct squashfs_page_actor *squashfs_page_actor_init(struct page **page,
int pages, int length, void (*release_pages)(struct page **, int, int))
/*
* This file contains implementations of page_actor for decompressing into
* an intermediate buffer, and for decompressing directly into the
* page cache.
*
* Calling code should avoid sleeping between calls to squashfs_first_page()
* and squashfs_finish_page().
*/
/* Implementation of page_actor for decompressing into intermediate buffer */
static void *cache_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->buffer[0];
}
static void *cache_next_page(struct squashfs_page_actor *actor)
{
if (actor->next_page == actor->pages)
return NULL;
return actor->buffer[actor->next_page++];
}
static void cache_finish_page(struct squashfs_page_actor *actor)
{
/* empty */
}
struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
actor->length = length ? : pages * PAGE_CACHE_SIZE;
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
actor->squashfs_first_page = cache_first_page;
actor->squashfs_next_page = cache_next_page;
actor->squashfs_finish_page = cache_finish_page;
return actor;
}
/* Implementation of page_actor for decompressing directly into page cache. */
static void *direct_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->pageaddr = kmap_atomic(actor->page[0]);
}
static void *direct_next_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_atomic(actor->pageaddr);
return actor->pageaddr = actor->next_page == actor->pages ? NULL :
kmap_atomic(actor->page[actor->next_page++]);
}
static void direct_finish_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_atomic(actor->pageaddr);
}
struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
@ -25,129 +93,8 @@ struct squashfs_page_actor *squashfs_page_actor_init(struct page **page,
actor->pages = pages;
actor->next_page = 0;
actor->pageaddr = NULL;
actor->release_pages = release_pages;
actor->squashfs_first_page = direct_first_page;
actor->squashfs_next_page = direct_next_page;
actor->squashfs_finish_page = direct_finish_page;
return actor;
}
void squashfs_page_actor_free(struct squashfs_page_actor *actor, int error)
{
if (!actor)
return;
if (actor->release_pages)
actor->release_pages(actor->page, actor->pages, error);
kfree(actor);
}
void squashfs_actor_to_buf(struct squashfs_page_actor *actor, void *buf,
int length)
{
void *pageaddr;
int pos = 0, avail, i;
for (i = 0; i < actor->pages && pos < length; ++i) {
avail = min_t(int, length - pos, PAGE_CACHE_SIZE);
if (actor->page[i]) {
pageaddr = kmap_atomic(actor->page[i]);
memcpy(buf + pos, pageaddr, avail);
kunmap_atomic(pageaddr);
}
pos += avail;
}
}
void squashfs_buf_to_actor(void *buf, struct squashfs_page_actor *actor,
int length)
{
void *pageaddr;
int pos = 0, avail, i;
for (i = 0; i < actor->pages && pos < length; ++i) {
avail = min_t(int, length - pos, PAGE_CACHE_SIZE);
if (actor->page[i]) {
pageaddr = kmap_atomic(actor->page[i]);
memcpy(pageaddr, buf + pos, avail);
kunmap_atomic(pageaddr);
}
pos += avail;
}
}
void squashfs_bh_to_actor(struct buffer_head **bh, int nr_buffers,
struct squashfs_page_actor *actor, int offset, int length, int blksz)
{
void *kaddr = NULL;
int bytes = 0, pgoff = 0, b = 0, p = 0, avail, i;
while (bytes < length) {
if (actor->page[p]) {
kaddr = kmap_atomic(actor->page[p]);
while (pgoff < PAGE_CACHE_SIZE && bytes < length) {
avail = min_t(int, blksz - offset,
PAGE_CACHE_SIZE - pgoff);
memcpy(kaddr + pgoff, bh[b]->b_data + offset,
avail);
pgoff += avail;
bytes += avail;
offset = (offset + avail) % blksz;
if (!offset) {
put_bh(bh[b]);
++b;
}
}
kunmap_atomic(kaddr);
pgoff = 0;
} else {
for (i = 0; i < PAGE_CACHE_SIZE / blksz; ++i) {
if (bh[b])
put_bh(bh[b]);
++b;
}
bytes += PAGE_CACHE_SIZE;
}
++p;
}
}
void squashfs_bh_to_buf(struct buffer_head **bh, int nr_buffers, void *buf,
int offset, int length, int blksz)
{
int i, avail, bytes = 0;
for (i = 0; i < nr_buffers && bytes < length; ++i) {
avail = min_t(int, length - bytes, blksz - offset);
if (bh[i]) {
memcpy(buf + bytes, bh[i]->b_data + offset, avail);
put_bh(bh[i]);
}
bytes += avail;
offset = 0;
}
}
void free_page_array(struct page **page, int nr_pages)
{
int i;
for (i = 0; i < nr_pages; ++i)
__free_page(page[i]);
kfree(page);
}
struct page **alloc_page_array(int nr_pages, int gfp_mask)
{
int i;
struct page **page;
page = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
if (!page)
return NULL;
for (i = 0; i < nr_pages; ++i) {
page[i] = alloc_page(gfp_mask);
if (!page[i]) {
free_page_array(page, i);
return NULL;
}
}
return page;
}

View file

@ -5,61 +5,77 @@
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level squashfsory.
* the COPYING file in the top-level directory.
*/
#ifndef CONFIG_SQUASHFS_FILE_DIRECT
struct squashfs_page_actor {
struct page **page;
void *pageaddr;
void **page;
int pages;
int length;
int next_page;
void (*release_pages)(struct page **, int, int);
};
extern struct squashfs_page_actor *squashfs_page_actor_init(struct page **,
int, int, void (*)(struct page **, int, int));
extern void squashfs_page_actor_free(struct squashfs_page_actor *, int);
static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
extern void squashfs_actor_to_buf(struct squashfs_page_actor *, void *, int);
extern void squashfs_buf_to_actor(void *, struct squashfs_page_actor *, int);
extern void squashfs_bh_to_actor(struct buffer_head **, int,
struct squashfs_page_actor *, int, int, int);
extern void squashfs_bh_to_buf(struct buffer_head **, int, void *, int, int,
int);
if (actor == NULL)
return NULL;
actor->length = length ? : pages * PAGE_CACHE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
return actor;
}
/*
* Calling code should avoid sleeping between calls to squashfs_first_page()
* and squashfs_finish_page().
*/
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->pageaddr = actor->page[0] ? kmap_atomic(actor->page[0])
: NULL;
return actor->page[0];
}
static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
{
if (!IS_ERR_OR_NULL(actor->pageaddr))
kunmap_atomic(actor->pageaddr);
if (actor->next_page == actor->pages)
return actor->pageaddr = ERR_PTR(-ENODATA);
actor->pageaddr = actor->page[actor->next_page] ?
kmap_atomic(actor->page[actor->next_page]) : NULL;
++actor->next_page;
return actor->pageaddr;
return actor->next_page == actor->pages ? NULL :
actor->page[actor->next_page++];
}
static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
if (!IS_ERR_OR_NULL(actor->pageaddr))
kunmap_atomic(actor->pageaddr);
/* empty */
}
#else
struct squashfs_page_actor {
union {
void **buffer;
struct page **page;
};
void *pageaddr;
void *(*squashfs_first_page)(struct squashfs_page_actor *);
void *(*squashfs_next_page)(struct squashfs_page_actor *);
void (*squashfs_finish_page)(struct squashfs_page_actor *);
int pages;
int length;
int next_page;
};
extern struct page **alloc_page_array(int, int);
extern void free_page_array(struct page **, int);
extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
**, int, int);
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
return actor->squashfs_first_page(actor);
}
static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
{
return actor->squashfs_next_page(actor);
}
static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
actor->squashfs_finish_page(actor);
}
#endif
#endif

View file

@ -28,14 +28,8 @@
#define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args)
/* block.c */
extern int squashfs_init_read_wq(void);
extern void squashfs_destroy_read_wq(void);
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
extern int squashfs_read_data_async(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
/* cache.c */
extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
@ -76,9 +70,8 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
int);
/* file_direct.c */
extern int squashfs_readpages_block(struct page *, struct list_head *,
unsigned int *, struct address_space *, int, u64, int);
/* file_xxx.c */
extern int squashfs_readpage_block(struct page *, u64, int);
/* id.c */
extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);

View file

@ -49,7 +49,7 @@ struct squashfs_cache_entry {
int num_waiters;
wait_queue_head_t wait_queue;
struct squashfs_cache *cache;
struct page **page;
void **data;
struct squashfs_page_actor *actor;
};

View file

@ -445,15 +445,9 @@ static int __init init_squashfs_fs(void)
if (err)
return err;
if (!squashfs_init_read_wq()) {
destroy_inodecache();
return -ENOMEM;
}
err = register_filesystem(&squashfs_fs_type);
if (err) {
destroy_inodecache();
squashfs_destroy_read_wq();
return err;
}
@ -467,7 +461,6 @@ static void __exit exit_squashfs_fs(void)
{
unregister_filesystem(&squashfs_fs_type);
destroy_inodecache();
squashfs_destroy_read_wq();
}

View file

@ -55,7 +55,7 @@ static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
struct comp_opts *opts;
int err = 0, n;
opts = kmalloc(sizeof(*opts), GFP_ATOMIC);
opts = kmalloc(sizeof(*opts), GFP_KERNEL);
if (opts == NULL) {
err = -ENOMEM;
goto out2;
@ -136,7 +136,6 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
enum xz_ret xz_err;
int avail, total = 0, k = 0;
struct squashfs_xz *stream = strm;
void *buf = NULL;
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
@ -157,20 +156,12 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
if (!IS_ERR(stream->buf.out)) {
if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
total += PAGE_CACHE_SIZE;
}
}
if (!stream->buf.out) {
if (!buf) {
buf = kmalloc(PAGE_CACHE_SIZE, GFP_ATOMIC);
if (!buf)
goto out;
}
stream->buf.out = buf;
}
xz_err = xz_dec_run(stream->state, &stream->buf);
if (stream->buf.in_pos == stream->buf.in_size && k < b)
@ -182,13 +173,11 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (xz_err != XZ_STREAM_END || k < b)
goto out;
kfree(buf);
return total + stream->buf.out_pos;
out:
for (; k < b; k++)
put_bh(bh[k]);
kfree(buf);
return -EIO;
}

View file

@ -66,7 +66,6 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
void *buf = NULL;
int zlib_err, zlib_init = 0, k = 0;
z_stream *stream = strm;
@ -85,19 +84,10 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
if (!IS_ERR(stream->next_out))
if (stream->next_out != NULL)
stream->avail_out = PAGE_CACHE_SIZE;
}
if (!stream->next_out) {
if (!buf) {
buf = kmalloc(PAGE_CACHE_SIZE, GFP_ATOMIC);
if (!buf)
goto out;
}
stream->next_out = buf;
}
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
@ -125,13 +115,11 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (k < b)
goto out;
kfree(buf);
return stream->total_out;
out:
for (; k < b; k++)
put_bh(bh[k]);
kfree(buf);
return -EIO;
}

View file

@ -389,7 +389,7 @@ static int __init init_timer_list_procfs(void)
{
struct proc_dir_entry *pe;
pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
if (!pe)
return -ENOMEM;
return 0;

View file

@ -4767,6 +4767,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
if (ops->flags & FTRACE_OPS_FL_ENABLED)
ftrace_shutdown(ops, 0);
ops->flags |= FTRACE_OPS_FL_DELETED;
ftrace_free_filter(ops);
mutex_unlock(&ftrace_lock);
}

View file

@ -727,8 +727,10 @@ static int set_trigger_filter(char *filter_str,
/* The filter is for the 'trigger' event, not the triggered event */
ret = create_event_filter(file->event_call, filter_str, false, &filter);
if (ret)
goto out;
/*
* If create_event_filter() fails, filter still needs to be freed.
* Which the calling code will do with data->filter.
*/
assign:
tmp = rcu_access_pointer(data->filter);

View file

@ -1,27 +1,38 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interval_tree.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/timex.h>
#define NODES 100
#define PERF_LOOPS 100000
#define SEARCHES 100
#define SEARCH_LOOPS 10000
#define __param(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg);
__param(int, nnodes, 100, "Number of nodes in the interval tree");
__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
__param(int, nsearches, 100, "Number of searches to the interval tree");
__param(int, search_loops, 1000, "Number of iterations searching the tree");
__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
static struct rb_root root = RB_ROOT;
static struct interval_tree_node nodes[NODES];
static u32 queries[SEARCHES];
static struct interval_tree_node *nodes = NULL;
static u32 *queries = NULL;
static struct rnd_state rnd;
static inline unsigned long
search(unsigned long query, struct rb_root *root)
search(struct rb_root *root, unsigned long start, unsigned long last)
{
struct interval_tree_node *node;
unsigned long results = 0;
for (node = interval_tree_iter_first(root, query, query); node;
node = interval_tree_iter_next(node, query, query))
for (node = interval_tree_iter_first(root, start, last); node;
node = interval_tree_iter_next(node, start, last))
results++;
return results;
}
@ -29,19 +40,22 @@ search(unsigned long query, struct rb_root *root)
static void init(void)
{
int i;
for (i = 0; i < NODES; i++) {
u32 a = prandom_u32_state(&rnd);
u32 b = prandom_u32_state(&rnd);
if (a <= b) {
nodes[i].start = a;
nodes[i].last = b;
} else {
nodes[i].start = b;
nodes[i].last = a;
}
for (i = 0; i < nnodes; i++) {
u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
u32 a = (prandom_u32_state(&rnd) >> 4) % b;
nodes[i].start = a;
nodes[i].last = b;
}
for (i = 0; i < SEARCHES; i++)
queries[i] = prandom_u32_state(&rnd);
/*
* Limit the search scope to what the user defined.
* Otherwise we are merely measuring empty walks,
* which is pointless.
*/
for (i = 0; i < nsearches; i++)
queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
}
static int interval_tree_test_init(void)
@ -50,6 +64,16 @@ static int interval_tree_test_init(void)
unsigned long results;
cycles_t time1, time2, time;
nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
if (!queries) {
kfree(nodes);
return -ENOMEM;
}
printk(KERN_ALERT "interval tree insert/remove");
prandom_seed_state(&rnd, 3141592653589793238ULL);
@ -57,39 +81,46 @@ static int interval_tree_test_init(void)
time1 = get_cycles();
for (i = 0; i < PERF_LOOPS; i++) {
for (j = 0; j < NODES; j++)
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
for (j = 0; j < NODES; j++)
for (j = 0; j < nnodes; j++)
interval_tree_remove(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, PERF_LOOPS);
time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
printk(KERN_ALERT "interval tree search");
for (j = 0; j < NODES; j++)
for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
time1 = get_cycles();
results = 0;
for (i = 0; i < SEARCH_LOOPS; i++)
for (j = 0; j < SEARCHES; j++)
results += search(queries[j], &root);
for (i = 0; i < search_loops; i++)
for (j = 0; j < nsearches; j++) {
unsigned long start = search_all ? 0 : queries[j];
unsigned long last = search_all ? max_endpoint : queries[j];
results += search(&root, start, last);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, SEARCH_LOOPS);
results = div_u64(results, SEARCH_LOOPS);
time = div_u64(time, search_loops);
results = div_u64(results, search_loops);
printk(" -> %llu cycles (%lu results)\n",
(unsigned long long)time, results);
kfree(queries);
kfree(nodes);
return -EAGAIN; /* Fail will directly unload the module */
}

View file

@ -1,11 +1,18 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rbtree_augmented.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/timex.h>
#define NODES 100
#define PERF_LOOPS 100000
#define CHECK_LOOPS 100
#define __param(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg);
__param(int, nnodes, 100, "Number of nodes in the rb-tree");
__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
struct test_node {
u32 key;
@ -17,7 +24,7 @@ struct test_node {
};
static struct rb_root root = RB_ROOT;
static struct test_node nodes[NODES];
static struct test_node *nodes = NULL;
static struct rnd_state rnd;
@ -95,7 +102,7 @@ static void erase_augmented(struct test_node *node, struct rb_root *root)
static void init(void)
{
int i;
for (i = 0; i < NODES; i++) {
for (i = 0; i < nnodes; i++) {
nodes[i].key = prandom_u32_state(&rnd);
nodes[i].val = prandom_u32_state(&rnd);
}
@ -177,6 +184,10 @@ static int __init rbtree_test_init(void)
int i, j;
cycles_t time1, time2, time;
nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
printk(KERN_ALERT "rbtree testing");
prandom_seed_state(&rnd, 3141592653589793238ULL);
@ -184,27 +195,27 @@ static int __init rbtree_test_init(void)
time1 = get_cycles();
for (i = 0; i < PERF_LOOPS; i++) {
for (j = 0; j < NODES; j++)
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert(nodes + j, &root);
for (j = 0; j < NODES; j++)
for (j = 0; j < nnodes; j++)
erase(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, PERF_LOOPS);
time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
for (i = 0; i < CHECK_LOOPS; i++) {
for (i = 0; i < check_loops; i++) {
init();
for (j = 0; j < NODES; j++) {
for (j = 0; j < nnodes; j++) {
check(j);
insert(nodes + j, &root);
}
for (j = 0; j < NODES; j++) {
check(NODES - j);
for (j = 0; j < nnodes; j++) {
check(nnodes - j);
erase(nodes + j, &root);
}
check(0);
@ -216,32 +227,34 @@ static int __init rbtree_test_init(void)
time1 = get_cycles();
for (i = 0; i < PERF_LOOPS; i++) {
for (j = 0; j < NODES; j++)
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_augmented(nodes + j, &root);
for (j = 0; j < NODES; j++)
for (j = 0; j < nnodes; j++)
erase_augmented(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, PERF_LOOPS);
time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
for (i = 0; i < CHECK_LOOPS; i++) {
for (i = 0; i < check_loops; i++) {
init();
for (j = 0; j < NODES; j++) {
for (j = 0; j < nnodes; j++) {
check_augmented(j);
insert_augmented(nodes + j, &root);
}
for (j = 0; j < NODES; j++) {
check_augmented(NODES - j);
for (j = 0; j < nnodes; j++) {
check_augmented(nnodes - j);
erase_augmented(nodes + j, &root);
}
check_augmented(0);
}
kfree(nodes);
return -EAGAIN; /* Fail will directly unload the module */
}

View file

@ -1886,7 +1886,8 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
params[ac].acm = acm;
params[ac].uapsd = uapsd;
if (params[ac].cw_min > params[ac].cw_max) {
if (params[ac].cw_min == 0 ||
params[ac].cw_min > params[ac].cw_max) {
sdata_info(sdata,
"AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
params[ac].cw_min, params[ac].cw_max, aci);

View file

@ -758,8 +758,15 @@ void xprt_connect(struct rpc_task *task)
return;
if (xprt_test_and_set_connecting(xprt))
return;
xprt->stat.connect_start = jiffies;
xprt->ops->connect(xprt, task);
/* Race breaker */
if (!xprt_connected(xprt)) {
xprt->stat.connect_start = jiffies;
xprt->ops->connect(xprt, task);
} else {
xprt_clear_connecting(xprt);
task->tk_status = 0;
rpc_wake_up_queued_task(&xprt->pending, task);
}
}
xprt_release_write(xprt, task);
}

View file

@ -785,6 +785,9 @@ wavefront_send_patch (snd_wavefront_t *dev, wavefront_patch_info *header)
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n",
header->number);
if (header->number >= ARRAY_SIZE(dev->patch_status))
return -EINVAL;
dev->patch_status[header->number] |= WF_SLOT_FILLED;
bptr = buf;
@ -809,6 +812,9 @@ wavefront_send_program (snd_wavefront_t *dev, wavefront_patch_info *header)
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n",
header->number);
if (header->number >= ARRAY_SIZE(dev->prog_status))
return -EINVAL;
dev->prog_status[header->number] = WF_SLOT_USED;
/* XXX need to zero existing SLOT_USED bit for program_status[i]
@ -898,6 +904,9 @@ wavefront_send_sample (snd_wavefront_t *dev,
header->number = x;
}
if (header->number >= WF_MAX_SAMPLE)
return -EINVAL;
if (header->size) {
/* XXX it's a debatable point whether or not RDONLY semantics