Merge tag 'v4.4.17' into linux-linaro-lsk-v4.4
This is the 4.4.17 stable release
This commit is contained in:
commit
3d27bcb804
66 changed files with 596 additions and 256 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 16
|
||||
SUBLEVEL = 17
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -48,8 +48,6 @@ endif
|
|||
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
|
||||
|
||||
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
|
||||
ifeq ($(atleast_gcc48),y)
|
||||
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
|
||||
|
|
|
@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
|||
* prelogue is setup (callee regs saved and then fp set and not other
|
||||
* way around
|
||||
*/
|
||||
pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
|
||||
pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
|
||||
return 0;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -76,6 +76,8 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
|
|||
u8 ret_flags;
|
||||
|
||||
version = src->version;
|
||||
/* Make the latest version visible */
|
||||
smp_rmb();
|
||||
|
||||
offset = pvclock_get_nsec_offset(src);
|
||||
ret = src->system_time + offset;
|
||||
|
|
|
@ -1110,6 +1110,13 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
|
|||
void *at;
|
||||
u64 pebs_status;
|
||||
|
||||
/*
|
||||
* fmt0 does not have a status bitfield (does not use
|
||||
* perf_record_nhm format)
|
||||
*/
|
||||
if (x86_pmu.intel_cap.pebs_format < 1)
|
||||
return base;
|
||||
|
||||
if (base == NULL)
|
||||
return NULL;
|
||||
|
||||
|
@ -1195,7 +1202,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
|||
if (!event->attr.precise_ip)
|
||||
return;
|
||||
|
||||
n = (top - at) / x86_pmu.pebs_record_size;
|
||||
n = top - at;
|
||||
if (n <= 0)
|
||||
return;
|
||||
|
||||
|
|
|
@ -11,7 +11,11 @@
|
|||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/bcma/bcma.h>
|
||||
#include <linux/bcma/bcma_regs.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/dma.h>
|
||||
|
@ -21,6 +25,9 @@
|
|||
#include <asm/iommu.h>
|
||||
#include <asm/gart.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
|
||||
#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
|
||||
|
||||
static void __init fix_hypertransport_config(int num, int slot, int func)
|
||||
{
|
||||
|
@ -75,6 +82,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
|
|||
{
|
||||
#ifdef CONFIG_ACPI
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
/*
|
||||
* Only applies to Nvidia root ports (bus 0) and not to
|
||||
* Nvidia graphics cards with PCI ports on secondary buses.
|
||||
*/
|
||||
if (num)
|
||||
return;
|
||||
|
||||
/*
|
||||
* All timer overrides on Nvidia are
|
||||
* wrong unless HPET is enabled.
|
||||
|
@ -589,6 +603,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
|
|||
#endif
|
||||
}
|
||||
|
||||
#define BCM4331_MMIO_SIZE 16384
|
||||
#define BCM4331_PM_CAP 0x40
|
||||
#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
|
||||
#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
|
||||
|
||||
static void __init apple_airport_reset(int bus, int slot, int func)
|
||||
{
|
||||
void __iomem *mmio;
|
||||
u16 pmcsr;
|
||||
u64 addr;
|
||||
int i;
|
||||
|
||||
if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
|
||||
return;
|
||||
|
||||
/* Card may have been put into PCI_D3hot by grub quirk */
|
||||
pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
|
||||
|
||||
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
|
||||
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
|
||||
write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
|
||||
mdelay(10);
|
||||
|
||||
pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
|
||||
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
|
||||
dev_err("Cannot power up Apple AirPort card\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
|
||||
addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
|
||||
addr &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
||||
mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
|
||||
if (!mmio) {
|
||||
dev_err("Cannot iomap Apple AirPort card\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
|
||||
|
||||
for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
|
||||
udelay(10);
|
||||
|
||||
bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
|
||||
bcma_aread32(BCMA_RESET_CTL);
|
||||
udelay(1);
|
||||
|
||||
bcma_awrite32(BCMA_RESET_CTL, 0);
|
||||
bcma_aread32(BCMA_RESET_CTL);
|
||||
udelay(10);
|
||||
|
||||
early_iounmap(mmio, BCM4331_MMIO_SIZE);
|
||||
}
|
||||
|
||||
#define QFLAG_APPLY_ONCE 0x1
|
||||
#define QFLAG_APPLIED 0x2
|
||||
|
@ -602,12 +671,6 @@ struct chipset {
|
|||
void (*f)(int num, int slot, int func);
|
||||
};
|
||||
|
||||
/*
|
||||
* Only works for devices on the root bus. If you add any devices
|
||||
* not on bus 0 readd another loop level in early_quirks(). But
|
||||
* be careful because at least the Nvidia quirk here relies on
|
||||
* only matching on bus 0.
|
||||
*/
|
||||
static struct chipset early_qrk[] __initdata = {
|
||||
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
|
||||
PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
|
||||
|
@ -637,9 +700,13 @@ static struct chipset early_qrk[] __initdata = {
|
|||
*/
|
||||
{ PCI_VENDOR_ID_INTEL, 0x0f00,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
|
||||
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init early_pci_scan_bus(int bus);
|
||||
|
||||
/**
|
||||
* check_dev_quirk - apply early quirks to a given PCI device
|
||||
* @num: bus number
|
||||
|
@ -648,7 +715,7 @@ static struct chipset early_qrk[] __initdata = {
|
|||
*
|
||||
* Check the vendor & device ID against the early quirks table.
|
||||
*
|
||||
* If the device is single function, let early_quirks() know so we don't
|
||||
* If the device is single function, let early_pci_scan_bus() know so we don't
|
||||
* poke at this device again.
|
||||
*/
|
||||
static int __init check_dev_quirk(int num, int slot, int func)
|
||||
|
@ -657,6 +724,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
|
|||
u16 vendor;
|
||||
u16 device;
|
||||
u8 type;
|
||||
u8 sec;
|
||||
int i;
|
||||
|
||||
class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
|
||||
|
@ -684,25 +752,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
|
|||
|
||||
type = read_pci_config_byte(num, slot, func,
|
||||
PCI_HEADER_TYPE);
|
||||
|
||||
if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
|
||||
sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
|
||||
if (sec > num)
|
||||
early_pci_scan_bus(sec);
|
||||
}
|
||||
|
||||
if (!(type & 0x80))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init early_quirks(void)
|
||||
static void __init early_pci_scan_bus(int bus)
|
||||
{
|
||||
int slot, func;
|
||||
|
||||
if (!early_pci_allowed())
|
||||
return;
|
||||
|
||||
/* Poor man's PCI discovery */
|
||||
/* Only scan the root bus */
|
||||
for (slot = 0; slot < 32; slot++)
|
||||
for (func = 0; func < 8; func++) {
|
||||
/* Only probe function 0 on single fn devices */
|
||||
if (check_dev_quirk(0, slot, func))
|
||||
if (check_dev_quirk(bus, slot, func))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void __init early_quirks(void)
|
||||
{
|
||||
if (!early_pci_allowed())
|
||||
return;
|
||||
|
||||
early_pci_scan_bus(0);
|
||||
}
|
||||
|
|
|
@ -66,6 +66,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
|||
|
||||
do {
|
||||
version = __pvclock_read_cycles(src, &ret, &flags);
|
||||
/* Make sure that the version double-check is last. */
|
||||
smp_rmb();
|
||||
} while ((src->version & 1) || version != src->version);
|
||||
|
||||
return flags & valid_flags;
|
||||
|
@ -80,6 +82,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|||
|
||||
do {
|
||||
version = __pvclock_read_cycles(src, &ret, &flags);
|
||||
/* Make sure that the version double-check is last. */
|
||||
smp_rmb();
|
||||
} while ((src->version & 1) || version != src->version);
|
||||
|
||||
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
|
||||
|
|
|
@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
|
|||
if (ret)
|
||||
goto out;
|
||||
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
|
||||
task_lock(p);
|
||||
if (p->io_context)
|
||||
ret = p->io_context->ioprio;
|
||||
task_unlock(p);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -4138,6 +4138,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
*/
|
||||
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/*
|
||||
* Device times out with higher max sects.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||
*/
|
||||
{ "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
/* Devices where NCQ should be avoided */
|
||||
|
|
|
@ -8,8 +8,6 @@
|
|||
#include <linux/bcma/bcma.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#define BCMA_CORE_SIZE 0x1000
|
||||
|
||||
#define bcma_err(bus, fmt, ...) \
|
||||
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
|
||||
#define bcma_warn(bus, fmt, ...) \
|
||||
|
|
|
@ -153,6 +153,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
|
|||
return NULL;
|
||||
|
||||
init.name = name;
|
||||
init.flags = 0;
|
||||
init.num_parents = num_parents;
|
||||
init.parent_names = parent_names;
|
||||
init.ops = &rockchip_mmc_clk_ops;
|
||||
|
|
|
@ -242,7 +242,7 @@ struct at_xdmac_lld {
|
|||
u32 mbr_dus; /* Destination Microblock Stride Register */
|
||||
};
|
||||
|
||||
|
||||
/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
|
||||
struct at_xdmac_desc {
|
||||
struct at_xdmac_lld lld;
|
||||
enum dma_transfer_direction direction;
|
||||
|
@ -253,7 +253,7 @@ struct at_xdmac_desc {
|
|||
unsigned int xfer_size;
|
||||
struct list_head descs_list;
|
||||
struct list_head xfer_node;
|
||||
};
|
||||
} __aligned(sizeof(u64));
|
||||
|
||||
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
|
||||
{
|
||||
|
@ -1388,6 +1388,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||
u32 cur_nda, check_nda, cur_ubc, mask, value;
|
||||
u8 dwidth = 0;
|
||||
unsigned long flags;
|
||||
bool initd;
|
||||
|
||||
ret = dma_cookie_status(chan, cookie, txstate);
|
||||
if (ret == DMA_COMPLETE)
|
||||
|
@ -1412,7 +1413,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||
residue = desc->xfer_size;
|
||||
/*
|
||||
* Flush FIFO: only relevant when the transfer is source peripheral
|
||||
* synchronized.
|
||||
* synchronized. Flush is needed before reading CUBC because data in
|
||||
* the FIFO are not reported by CUBC. Reporting a residue of the
|
||||
* transfer length while we have data in FIFO can cause issue.
|
||||
* Usecase: atmel USART has a timeout which means I have received
|
||||
* characters but there is no more character received for a while. On
|
||||
* timeout, it requests the residue. If the data are in the DMA FIFO,
|
||||
* we will return a residue of the transfer length. It means no data
|
||||
* received. If an application is waiting for these data, it will hang
|
||||
* since we won't have another USART timeout without receiving new
|
||||
* data.
|
||||
*/
|
||||
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
|
||||
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
|
||||
|
@ -1423,34 +1433,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||
}
|
||||
|
||||
/*
|
||||
* When processing the residue, we need to read two registers but we
|
||||
* can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
|
||||
* we stand in the descriptor list and AT_XDMAC_CUBC is used
|
||||
* to know how many data are remaining for the current descriptor.
|
||||
* Since the dma channel is not paused to not loose data, between the
|
||||
* AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
|
||||
* descriptor.
|
||||
* For that reason, after reading AT_XDMAC_CUBC, we check if we are
|
||||
* still using the same descriptor by reading a second time
|
||||
* AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
|
||||
* read again AT_XDMAC_CUBC.
|
||||
* The easiest way to compute the residue should be to pause the DMA
|
||||
* but doing this can lead to miss some data as some devices don't
|
||||
* have FIFO.
|
||||
* We need to read several registers because:
|
||||
* - DMA is running therefore a descriptor change is possible while
|
||||
* reading these registers
|
||||
* - When the block transfer is done, the value of the CUBC register
|
||||
* is set to its initial value until the fetch of the next descriptor.
|
||||
* This value will corrupt the residue calculation so we have to skip
|
||||
* it.
|
||||
*
|
||||
* INITD -------- ------------
|
||||
* |____________________|
|
||||
* _______________________ _______________
|
||||
* NDA @desc2 \/ @desc3
|
||||
* _______________________/\_______________
|
||||
* __________ ___________ _______________
|
||||
* CUBC 0 \/ MAX desc1 \/ MAX desc2
|
||||
* __________/\___________/\_______________
|
||||
*
|
||||
* Since descriptors are aligned on 64 bits, we can assume that
|
||||
* the update of NDA and CUBC is atomic.
|
||||
* Memory barriers are used to ensure the read order of the registers.
|
||||
* A max number of retries is set because unlikely it can never ends if
|
||||
* we are transferring a lot of data with small buffers.
|
||||
* A max number of retries is set because unlikely it could never ends.
|
||||
*/
|
||||
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
|
||||
rmb();
|
||||
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
|
||||
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
|
||||
rmb();
|
||||
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
|
||||
|
||||
if (likely(cur_nda == check_nda))
|
||||
break;
|
||||
|
||||
cur_nda = check_nda;
|
||||
rmb();
|
||||
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
|
||||
rmb();
|
||||
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
|
||||
rmb();
|
||||
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
|
||||
rmb();
|
||||
|
||||
if ((check_nda == cur_nda) && initd)
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
|
||||
|
@ -1458,6 +1477,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||
goto spin_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush FIFO: only relevant when the transfer is source peripheral
|
||||
* synchronized. Another flush is needed here because CUBC is updated
|
||||
* when the controller sends the data write command. It can lead to
|
||||
* report data that are not written in the memory or the device. The
|
||||
* FIFO flush ensures that data are really written.
|
||||
*/
|
||||
if ((desc->lld.mbr_cfg & mask) == value) {
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
|
||||
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove size of all microblocks already transferred and the current
|
||||
* one. Then add the remaining size to transfer of the current
|
||||
|
|
|
@ -419,6 +419,38 @@ static struct intel_th_subdevice {
|
|||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
static void __intel_th_request_hub_module(struct work_struct *work)
|
||||
{
|
||||
struct intel_th *th = container_of(work, struct intel_th,
|
||||
request_module_work);
|
||||
|
||||
request_module("intel_th_%s", th->hub->name);
|
||||
}
|
||||
|
||||
static int intel_th_request_hub_module(struct intel_th *th)
|
||||
{
|
||||
INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
|
||||
schedule_work(&th->request_module_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_th_request_hub_module_flush(struct intel_th *th)
|
||||
{
|
||||
flush_work(&th->request_module_work);
|
||||
}
|
||||
#else
|
||||
static inline int intel_th_request_hub_module(struct intel_th *th)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void intel_th_request_hub_module_flush(struct intel_th *th)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static int intel_th_populate(struct intel_th *th, struct resource *devres,
|
||||
unsigned int ndevres, int irq)
|
||||
{
|
||||
|
@ -488,7 +520,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
|
|||
/* need switch driver to be loaded to enumerate the rest */
|
||||
if (subdev->type == INTEL_TH_SWITCH && !req) {
|
||||
th->hub = thdev;
|
||||
err = request_module("intel_th_%s", subdev->name);
|
||||
err = intel_th_request_hub_module(th);
|
||||
if (!err)
|
||||
req++;
|
||||
}
|
||||
|
@ -603,6 +635,7 @@ void intel_th_free(struct intel_th *th)
|
|||
{
|
||||
int i;
|
||||
|
||||
intel_th_request_hub_module_flush(th);
|
||||
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
|
||||
if (th->thdev[i] != th->hub)
|
||||
intel_th_device_remove(th->thdev[i]);
|
||||
|
|
|
@ -197,6 +197,9 @@ struct intel_th {
|
|||
|
||||
int id;
|
||||
int major;
|
||||
#ifdef CONFIG_MODULES
|
||||
struct work_struct request_module_work;
|
||||
#endif /* CONFIG_MODULES */
|
||||
#ifdef CONFIG_INTEL_TH_DEBUG
|
||||
struct dentry *dbg;
|
||||
#endif
|
||||
|
|
|
@ -67,6 +67,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
|
||||
.driver_data = (kernel_ulong_t)0,
|
||||
},
|
||||
{
|
||||
/* Kaby Lake PCH-H */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
|
||||
.driver_data = (kernel_ulong_t)0,
|
||||
},
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
|
|||
mux->data.idle_in_use = true;
|
||||
|
||||
/* map address from "reg" if exists */
|
||||
if (of_address_to_resource(np, 0, &res)) {
|
||||
if (of_address_to_resource(np, 0, &res) == 0) {
|
||||
mux->data.reg_size = resource_size(&res);
|
||||
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
|
||||
if (IS_ERR(mux->data.reg))
|
||||
|
|
|
@ -1200,22 +1200,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
int ep_irq_in_idx;
|
||||
int i, error;
|
||||
|
||||
if (intf->cur_altsetting->desc.bNumEndpoints != 2)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; xpad_device[i].idVendor; i++) {
|
||||
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
|
||||
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
|
||||
break;
|
||||
}
|
||||
|
||||
if (xpad_device[i].xtype == XTYPE_XBOXONE &&
|
||||
intf->cur_altsetting->desc.bInterfaceNumber != 0) {
|
||||
/*
|
||||
* The Xbox One controller lists three interfaces all with the
|
||||
* same interface class, subclass and protocol. Differentiate by
|
||||
* interface number.
|
||||
*/
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
|
||||
if (!xpad)
|
||||
return -ENOMEM;
|
||||
|
@ -1246,6 +1239,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
|
||||
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
|
||||
xpad->xtype = XTYPE_XBOX360W;
|
||||
else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
|
||||
xpad->xtype = XTYPE_XBOXONE;
|
||||
else
|
||||
xpad->xtype = XTYPE_XBOX360;
|
||||
} else {
|
||||
|
@ -1260,6 +1255,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
xpad->mapping |= MAP_STICKS_TO_NULL;
|
||||
}
|
||||
|
||||
if (xpad->xtype == XTYPE_XBOXONE &&
|
||||
intf->cur_altsetting->desc.bInterfaceNumber != 0) {
|
||||
/*
|
||||
* The Xbox One controller lists three interfaces all with the
|
||||
* same interface class, subclass and protocol. Differentiate by
|
||||
* interface number.
|
||||
*/
|
||||
error = -ENODEV;
|
||||
goto err_free_in_urb;
|
||||
}
|
||||
|
||||
error = xpad_init_output(intf, xpad);
|
||||
if (error)
|
||||
goto err_free_in_urb;
|
||||
|
|
|
@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
|
|||
case 5:
|
||||
etd->hw_version = 3;
|
||||
break;
|
||||
case 6:
|
||||
case 7:
|
||||
case 8:
|
||||
case 9:
|
||||
case 10:
|
||||
case 13:
|
||||
case 14:
|
||||
case 6 ... 14:
|
||||
etd->hw_version = 4;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
|
||||
psmouse_dbg(psmouse, "VMMouse port in use.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Check if the device is present */
|
||||
response = ~VMMOUSE_PROTO_MAGIC;
|
||||
VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
|
||||
if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
|
||||
release_region(VMMOUSE_PROTO_PORT, 4);
|
||||
if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (set_properties) {
|
||||
psmouse->vendor = VMMOUSE_VENDOR;
|
||||
|
@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
|
|||
psmouse->model = version;
|
||||
}
|
||||
|
||||
release_region(VMMOUSE_PROTO_PORT, 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
|
|||
psmouse_reset(psmouse);
|
||||
input_unregister_device(priv->abs_dev);
|
||||
kfree(priv);
|
||||
release_region(VMMOUSE_PROTO_PORT, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
|
|||
struct input_dev *rel_dev = psmouse->dev, *abs_dev;
|
||||
int error;
|
||||
|
||||
if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
|
||||
psmouse_dbg(psmouse, "VMMouse port in use.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
psmouse_reset(psmouse);
|
||||
error = vmmouse_enable(psmouse);
|
||||
if (error)
|
||||
goto release_region;
|
||||
return error;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
abs_dev = input_allocate_device();
|
||||
|
@ -502,8 +487,5 @@ init_fail:
|
|||
kfree(priv);
|
||||
psmouse->private = NULL;
|
||||
|
||||
release_region:
|
||||
release_region(VMMOUSE_PROTO_PORT, 4);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
#include <linux/regmap.h>
|
||||
#include "tsc200x-core.h"
|
||||
|
||||
static const struct input_id tsc2004_input_id = {
|
||||
.bustype = BUS_I2C,
|
||||
.product = 2004,
|
||||
};
|
||||
|
||||
static int tsc2004_cmd(struct device *dev, u8 cmd)
|
||||
{
|
||||
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
|
||||
|
@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
|
|||
const struct i2c_device_id *id)
|
||||
|
||||
{
|
||||
return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
|
||||
return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
|
||||
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
|
||||
tsc2004_cmd);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
#include <linux/regmap.h>
|
||||
#include "tsc200x-core.h"
|
||||
|
||||
static const struct input_id tsc2005_input_id = {
|
||||
.bustype = BUS_SPI,
|
||||
.product = 2005,
|
||||
};
|
||||
|
||||
static int tsc2005_cmd(struct device *dev, u8 cmd)
|
||||
{
|
||||
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
|
||||
|
@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
|
||||
return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
|
||||
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
|
||||
tsc2005_cmd);
|
||||
}
|
||||
|
|
|
@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
|
|||
mutex_unlock(&ts->mutex);
|
||||
}
|
||||
|
||||
int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
|
||||
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
|
||||
struct regmap *regmap,
|
||||
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
|
||||
{
|
||||
|
@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
|
|||
snprintf(ts->phys, sizeof(ts->phys),
|
||||
"%s/input-ts", dev_name(dev));
|
||||
|
||||
input_dev->name = "TSC200X touchscreen";
|
||||
if (tsc_id->product == 2004) {
|
||||
input_dev->name = "TSC200X touchscreen";
|
||||
} else {
|
||||
input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
|
||||
"TSC%04d touchscreen",
|
||||
tsc_id->product);
|
||||
if (!input_dev->name)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
input_dev->phys = ts->phys;
|
||||
input_dev->id.bustype = bustype;
|
||||
input_dev->id = *tsc_id;
|
||||
input_dev->dev.parent = dev;
|
||||
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
|
||||
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
extern const struct regmap_config tsc200x_regmap_config;
|
||||
extern const struct dev_pm_ops tsc200x_pm_ops;
|
||||
|
||||
int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
|
||||
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
|
||||
struct regmap *regmap,
|
||||
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
|
||||
int tsc200x_remove(struct device *dev);
|
||||
|
|
|
@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
|
|||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define W8001_MAX_LENGTH 11
|
||||
#define W8001_MAX_LENGTH 13
|
||||
#define W8001_LEAD_MASK 0x80
|
||||
#define W8001_LEAD_BYTE 0x80
|
||||
#define W8001_TAB_MASK 0x40
|
||||
|
|
|
@ -1073,7 +1073,7 @@ static int airspy_probe(struct usb_interface *intf,
|
|||
if (ret) {
|
||||
dev_err(s->dev, "Failed to register as video device (%d)\n",
|
||||
ret);
|
||||
goto err_unregister_v4l2_dev;
|
||||
goto err_free_controls;
|
||||
}
|
||||
dev_info(s->dev, "Registered as %s\n",
|
||||
video_device_node_name(&s->vdev));
|
||||
|
@ -1082,7 +1082,6 @@ static int airspy_probe(struct usb_interface *intf,
|
|||
|
||||
err_free_controls:
|
||||
v4l2_ctrl_handler_free(&s->hdl);
|
||||
err_unregister_v4l2_dev:
|
||||
v4l2_device_unregister(&s->v4l2_dev);
|
||||
err_free_mem:
|
||||
kfree(s);
|
||||
|
|
|
@ -1767,8 +1767,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
|
|||
|
||||
packed_cmd_hdr = packed->cmd_hdr;
|
||||
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
|
||||
packed_cmd_hdr[0] = (packed->nr_entries << 16) |
|
||||
(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
|
||||
packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
|
||||
(PACKED_CMD_WR << 8) | PACKED_CMD_VER);
|
||||
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
|
||||
|
||||
/*
|
||||
|
@ -1782,14 +1782,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
|
|||
((brq->data.blocks * brq->data.blksz) >=
|
||||
card->ext_csd.data_tag_unit_size);
|
||||
/* Argument of CMD23 */
|
||||
packed_cmd_hdr[(i * 2)] =
|
||||
packed_cmd_hdr[(i * 2)] = cpu_to_le32(
|
||||
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
|
||||
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
|
||||
blk_rq_sectors(prq);
|
||||
blk_rq_sectors(prq));
|
||||
/* Argument of CMD18 or CMD25 */
|
||||
packed_cmd_hdr[((i * 2)) + 1] =
|
||||
packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
|
||||
mmc_card_blockaddr(card) ?
|
||||
blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
|
||||
blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
|
||||
packed->blocks += blk_rq_sectors(prq);
|
||||
i++;
|
||||
}
|
||||
|
|
|
@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
|
|||
|
||||
/* upper group completed, look again in lower */
|
||||
if (priv->rx_next > get_mb_rx_low_last(priv) &&
|
||||
quota > 0 && mb > get_mb_rx_last(priv)) {
|
||||
mb > get_mb_rx_last(priv)) {
|
||||
priv->rx_next = get_mb_rx_first(priv);
|
||||
goto again;
|
||||
if (quota > 0)
|
||||
goto again;
|
||||
}
|
||||
|
||||
return received;
|
||||
|
|
|
@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
|
|||
|
||||
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
|
||||
|
||||
for (i = 0; i < frame->can_dlc; i += 2) {
|
||||
priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
|
||||
frame->data[i] | (frame->data[i + 1] << 8));
|
||||
if (priv->type == BOSCH_D_CAN) {
|
||||
u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
|
||||
|
||||
for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
|
||||
data = (u32)frame->data[i];
|
||||
data |= (u32)frame->data[i + 1] << 8;
|
||||
data |= (u32)frame->data[i + 2] << 16;
|
||||
data |= (u32)frame->data[i + 3] << 24;
|
||||
priv->write_reg32(priv, dreg, data);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < frame->can_dlc; i += 2) {
|
||||
priv->write_reg(priv,
|
||||
C_CAN_IFACE(DATA1_REG, iface) + i / 2,
|
||||
frame->data[i] |
|
||||
(frame->data[i + 1] << 8));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
|
|||
} else {
|
||||
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
|
||||
|
||||
for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
|
||||
data = priv->read_reg(priv, dreg);
|
||||
frame->data[i] = data;
|
||||
frame->data[i + 1] = data >> 8;
|
||||
if (priv->type == BOSCH_D_CAN) {
|
||||
for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
|
||||
data = priv->read_reg32(priv, dreg);
|
||||
frame->data[i] = data;
|
||||
frame->data[i + 1] = data >> 8;
|
||||
frame->data[i + 2] = data >> 16;
|
||||
frame->data[i + 3] = data >> 24;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
|
||||
data = priv->read_reg(priv, dreg);
|
||||
frame->data[i] = data;
|
||||
frame->data[i + 1] = data >> 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
|
|||
* - control mode with CAN_CTRLMODE_FD set
|
||||
*/
|
||||
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
if (data[IFLA_CAN_CTRLMODE]) {
|
||||
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
|
||||
|
||||
|
@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void can_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static struct rtnl_link_ops can_link_ops __read_mostly = {
|
||||
.kind = "can",
|
||||
.maxtype = IFLA_CAN_MAX,
|
||||
|
@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
|
|||
.validate = can_validate,
|
||||
.newlink = can_newlink,
|
||||
.changelink = can_changelink,
|
||||
.dellink = can_dellink,
|
||||
.get_size = can_get_size,
|
||||
.fill_info = can_fill_info,
|
||||
.get_xstats_size = can_get_xstats_size,
|
||||
|
|
|
@ -226,7 +226,7 @@
|
|||
/* Various constants */
|
||||
|
||||
/* Coalescing */
|
||||
#define MVNETA_TXDONE_COAL_PKTS 1
|
||||
#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
|
||||
#define MVNETA_RX_COAL_PKTS 32
|
||||
#define MVNETA_RX_COAL_USEC 100
|
||||
|
||||
|
|
|
@ -207,9 +207,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
|
|||
pin_reg = &info->pin_regs[pin_id];
|
||||
|
||||
if (pin_reg->mux_reg == -1) {
|
||||
dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
|
||||
dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
|
||||
info->pins[pin_id].name);
|
||||
return -EINVAL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (info->flags & SHARE_MUX_CONF_REG) {
|
||||
|
|
|
@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
|
|||
else
|
||||
mask &= ~soc_mask;
|
||||
pcs->write(mask, pcswi->reg);
|
||||
|
||||
/* flush posted write */
|
||||
mask = pcs->read(pcswi->reg);
|
||||
raw_spin_unlock(&pcs->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -147,13 +147,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
|
|||
goto exit;
|
||||
}
|
||||
|
||||
if (u_cmd.outsize != s_cmd->outsize ||
|
||||
u_cmd.insize != s_cmd->insize) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
s_cmd->command += ec->cmd_offset;
|
||||
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
|
||||
/* Only copy data to userland if data was received. */
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
|
||||
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
|
||||
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
|
||||
ret = -EFAULT;
|
||||
exit:
|
||||
kfree(s_cmd);
|
||||
|
|
|
@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
|
|||
|
||||
WARN_ON(tzd == NULL);
|
||||
psy = tzd->devdata;
|
||||
ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
|
||||
ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Convert tenths of degree Celsius to milli degree Celsius. */
|
||||
if (!ret)
|
||||
*temp = val.intval * 100;
|
||||
*temp = val.intval * 100;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
|
|||
int ret;
|
||||
|
||||
psy = tcd->devdata;
|
||||
ret = psy->desc->get_property(psy,
|
||||
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
|
||||
if (!ret)
|
||||
*state = val.intval;
|
||||
ret = power_supply_get_property(psy,
|
||||
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*state = val.intval;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
|
|||
int ret;
|
||||
|
||||
psy = tcd->devdata;
|
||||
ret = psy->desc->get_property(psy,
|
||||
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
|
||||
if (!ret)
|
||||
*state = val.intval;
|
||||
ret = power_supply_get_property(psy,
|
||||
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*state = val.intval;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
|
|||
struct pps_client_pp *device;
|
||||
|
||||
/* FIXME: oooh, this is ugly! */
|
||||
if (strcmp(pardev->name, KBUILD_MODNAME))
|
||||
if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
|
||||
/* not our port */
|
||||
return;
|
||||
|
||||
|
|
|
@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
|||
qeth_l2_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
|
|
@ -3246,6 +3246,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
|||
qeth_l3_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
|
|
@ -10095,6 +10095,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
|
|||
ioa_cfg->intr_flag = IPR_USE_MSI;
|
||||
else {
|
||||
ioa_cfg->intr_flag = IPR_USE_LSI;
|
||||
ioa_cfg->clear_isr = 1;
|
||||
ioa_cfg->nvectors = 1;
|
||||
dev_info(&pdev->dev, "Cannot enable MSI.\n");
|
||||
}
|
||||
|
|
|
@ -426,7 +426,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
|
|||
* here, and we don't know what device it is
|
||||
* trying to work with, leave it as-is.
|
||||
*/
|
||||
vmax = 8; /* max length of vendor */
|
||||
vmax = sizeof(devinfo->vendor);
|
||||
vskip = vendor;
|
||||
while (vmax > 0 && *vskip == ' ') {
|
||||
vmax--;
|
||||
|
@ -436,7 +436,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
|
|||
while (vmax > 0 && vskip[vmax - 1] == ' ')
|
||||
--vmax;
|
||||
|
||||
mmax = 16; /* max length of model */
|
||||
mmax = sizeof(devinfo->model);
|
||||
mskip = model;
|
||||
while (mmax > 0 && *mskip == ' ') {
|
||||
mmax--;
|
||||
|
@ -452,10 +452,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
|
|||
* Behave like the older version of get_device_flags.
|
||||
*/
|
||||
if (memcmp(devinfo->vendor, vskip, vmax) ||
|
||||
devinfo->vendor[vmax])
|
||||
(vmax < sizeof(devinfo->vendor) &&
|
||||
devinfo->vendor[vmax]))
|
||||
continue;
|
||||
if (memcmp(devinfo->model, mskip, mmax) ||
|
||||
devinfo->model[mmax])
|
||||
(mmax < sizeof(devinfo->model) &&
|
||||
devinfo->model[mmax]))
|
||||
continue;
|
||||
return devinfo;
|
||||
} else {
|
||||
|
|
|
@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
|
|||
{
|
||||
struct sun4i_spi *sspi = spi_master_get_devdata(master);
|
||||
unsigned int mclk_rate, div, timeout;
|
||||
unsigned int start, end, tx_time;
|
||||
unsigned int tx_len = 0;
|
||||
int ret = 0;
|
||||
u32 reg;
|
||||
|
||||
/* We don't support transfer larger than the FIFO */
|
||||
if (tfr->len > SUN4I_FIFO_DEPTH)
|
||||
return -EINVAL;
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
|
||||
return -EMSGSIZE;
|
||||
|
||||
reinit_completion(&sspi->done);
|
||||
sspi->tx_buf = tfr->tx_buf;
|
||||
|
@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
|
|||
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
|
||||
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
|
||||
|
||||
/* Fill the TX FIFO */
|
||||
sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
|
||||
/*
|
||||
* Fill the TX FIFO
|
||||
* Filling the FIFO fully causes timeout for some reason
|
||||
* at least on spi2 on A10s
|
||||
*/
|
||||
sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
|
||||
|
||||
/* Enable the interrupts */
|
||||
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
|
||||
|
@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
|
|||
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
|
||||
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
|
||||
|
||||
tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
|
||||
start = jiffies;
|
||||
timeout = wait_for_completion_timeout(&sspi->done,
|
||||
msecs_to_jiffies(1000));
|
||||
msecs_to_jiffies(tx_time));
|
||||
end = jiffies;
|
||||
if (!timeout) {
|
||||
dev_warn(&master->dev,
|
||||
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
|
||||
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
|
||||
jiffies_to_msecs(end - start), tx_time);
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
|
|||
{
|
||||
struct sun6i_spi *sspi = spi_master_get_devdata(master);
|
||||
unsigned int mclk_rate, div, timeout;
|
||||
unsigned int start, end, tx_time;
|
||||
unsigned int tx_len = 0;
|
||||
int ret = 0;
|
||||
u32 reg;
|
||||
|
@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
|
|||
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
|
||||
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
|
||||
|
||||
tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
|
||||
start = jiffies;
|
||||
timeout = wait_for_completion_timeout(&sspi->done,
|
||||
msecs_to_jiffies(1000));
|
||||
msecs_to_jiffies(tx_time));
|
||||
end = jiffies;
|
||||
if (!timeout) {
|
||||
dev_warn(&master->dev,
|
||||
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
|
||||
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
|
||||
jiffies_to_msecs(end - start), tx_time);
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
|
|||
{
|
||||
int branch;
|
||||
|
||||
ed->state = ED_OPER;
|
||||
ed->ed_prev = NULL;
|
||||
ed->ed_next = NULL;
|
||||
ed->hwNextED = 0;
|
||||
|
@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
|
|||
/* the HC may not see the schedule updates yet, but if it does
|
||||
* then they'll be properly ordered.
|
||||
*/
|
||||
|
||||
ed->state = ED_OPER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
|
|||
field_start = OFFSET(cfg_entry);
|
||||
field_end = OFFSET(cfg_entry) + field->size;
|
||||
|
||||
if ((req_start >= field_start && req_start < field_end)
|
||||
|| (req_end > field_start && req_end <= field_end)) {
|
||||
if (req_end > field_start && field_end > req_start) {
|
||||
err = conf_space_read(dev, cfg_entry, field_start,
|
||||
&tmp_val);
|
||||
if (err)
|
||||
|
@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
|
|||
field_start = OFFSET(cfg_entry);
|
||||
field_end = OFFSET(cfg_entry) + field->size;
|
||||
|
||||
if ((req_start >= field_start && req_start < field_end)
|
||||
|| (req_end > field_start && req_end <= field_end)) {
|
||||
if (req_end > field_start && field_end > req_start) {
|
||||
tmp_val = 0;
|
||||
|
||||
err = xen_pcibk_config_read(dev, field_start,
|
||||
|
|
|
@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
|
|||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry(trans, &u->transactions, list)
|
||||
if (trans->handle.id == u->u.msg.tx_id)
|
||||
break;
|
||||
if (&trans->list == &u->transactions)
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
reply = xenbus_dev_request_and_reply(&u->u.msg);
|
||||
if (IS_ERR(reply)) {
|
||||
kfree(trans);
|
||||
if (msg_type == XS_TRANSACTION_START)
|
||||
kfree(trans);
|
||||
rc = PTR_ERR(reply);
|
||||
goto out;
|
||||
}
|
||||
|
@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
|
|||
list_add(&trans->list, &u->transactions);
|
||||
}
|
||||
} else if (u->u.msg.type == XS_TRANSACTION_END) {
|
||||
list_for_each_entry(trans, &u->transactions, list)
|
||||
if (trans->handle.id == u->u.msg.tx_id)
|
||||
break;
|
||||
BUG_ON(&trans->list == &u->transactions);
|
||||
list_del(&trans->list);
|
||||
|
||||
kfree(trans);
|
||||
}
|
||||
|
||||
|
|
|
@ -250,9 +250,6 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
|
|||
|
||||
mutex_unlock(&xs_state.request_mutex);
|
||||
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
if ((msg->type == XS_TRANSACTION_END) ||
|
||||
((req_msg.type == XS_TRANSACTION_START) &&
|
||||
(msg->type == XS_ERROR)))
|
||||
|
|
|
@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
|
|||
v9fs_proto_dotu(v9ses));
|
||||
fid = file->private_data;
|
||||
if (!fid) {
|
||||
fid = v9fs_fid_clone(file->f_path.dentry);
|
||||
fid = v9fs_fid_clone(file_dentry(file));
|
||||
if (IS_ERR(fid))
|
||||
return PTR_ERR(fid);
|
||||
|
||||
|
@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
|
|||
* because we want write after unlink usecase
|
||||
* to work.
|
||||
*/
|
||||
fid = v9fs_writeback_fid(file->f_path.dentry);
|
||||
fid = v9fs_writeback_fid(file_dentry(file));
|
||||
if (IS_ERR(fid)) {
|
||||
err = PTR_ERR(fid);
|
||||
mutex_unlock(&v9inode->v_mutex);
|
||||
|
@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
* because we want write after unlink usecase
|
||||
* to work.
|
||||
*/
|
||||
fid = v9fs_writeback_fid(filp->f_path.dentry);
|
||||
fid = v9fs_writeback_fid(file_dentry(filp));
|
||||
if (IS_ERR(fid)) {
|
||||
retval = PTR_ERR(fid);
|
||||
mutex_unlock(&v9inode->v_mutex);
|
||||
|
|
|
@ -170,6 +170,19 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct file *lower_file = ecryptfs_file_to_lower(file);
|
||||
/*
|
||||
* Don't allow mmap on top of file systems that don't support it
|
||||
* natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
|
||||
* allows recursive mounting, this will need to be extended.
|
||||
*/
|
||||
if (!lower_file->f_op->mmap)
|
||||
return -ENODEV;
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* ecryptfs_open
|
||||
* @inode: inode speciying file to open
|
||||
|
@ -364,7 +377,7 @@ const struct file_operations ecryptfs_main_fops = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ecryptfs_compat_ioctl,
|
||||
#endif
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap = ecryptfs_mmap,
|
||||
.open = ecryptfs_open,
|
||||
.flush = ecryptfs_flush,
|
||||
.release = ecryptfs_release,
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/file.h>
|
||||
#include "ecryptfs_kernel.h"
|
||||
|
||||
struct ecryptfs_open_req {
|
||||
|
@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
|
|||
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
|
||||
(*lower_file) = dentry_open(&req.path, flags, cred);
|
||||
if (!IS_ERR(*lower_file))
|
||||
goto have_file;
|
||||
goto out;
|
||||
if ((flags & O_ACCMODE) == O_RDONLY) {
|
||||
rc = PTR_ERR((*lower_file));
|
||||
goto out;
|
||||
|
@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
|
|||
mutex_unlock(&ecryptfs_kthread_ctl.mux);
|
||||
wake_up(&ecryptfs_kthread_ctl.wait);
|
||||
wait_for_completion(&req.done);
|
||||
if (IS_ERR(*lower_file)) {
|
||||
if (IS_ERR(*lower_file))
|
||||
rc = PTR_ERR(*lower_file);
|
||||
goto out;
|
||||
}
|
||||
have_file:
|
||||
if ((*lower_file)->f_op->mmap == NULL) {
|
||||
fput(*lower_file);
|
||||
*lower_file = NULL;
|
||||
rc = -EMEDIUMTYPE;
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -469,6 +469,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
|
|||
error_msg = "invalid extent entries";
|
||||
goto corrupted;
|
||||
}
|
||||
if (unlikely(depth > 32)) {
|
||||
error_msg = "too large eh_depth";
|
||||
goto corrupted;
|
||||
}
|
||||
/* Verify checksum on non-root extent tree nodes */
|
||||
if (ext_depth(inode) != depth &&
|
||||
!ext4_extent_block_csum_verify(inode, eh)) {
|
||||
|
|
|
@ -1733,8 +1733,8 @@ static int __remove_privs(struct dentry *dentry, int kill)
|
|||
*/
|
||||
int file_remove_privs(struct file *file)
|
||||
{
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct dentry *dentry = file_dentry(file);
|
||||
struct inode *inode = file_inode(file);
|
||||
int kill;
|
||||
int error = 0;
|
||||
|
||||
|
@ -1742,7 +1742,7 @@ int file_remove_privs(struct file *file)
|
|||
if (IS_NOSEC(inode))
|
||||
return 0;
|
||||
|
||||
kill = file_needs_remove_privs(file);
|
||||
kill = dentry_needs_remove_privs(dentry);
|
||||
if (kill < 0)
|
||||
return kill;
|
||||
if (kill)
|
||||
|
|
|
@ -1602,7 +1602,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
|
|||
{
|
||||
struct file_lock *fl, *my_fl = NULL, *lease;
|
||||
struct dentry *dentry = filp->f_path.dentry;
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct inode *inode = file_inode(filp);
|
||||
struct file_lock_context *ctx;
|
||||
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
|
||||
int error;
|
||||
|
|
|
@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
|
|||
goto out_unlock;
|
||||
|
||||
lock_mount_hash();
|
||||
event++;
|
||||
while (!hlist_empty(&mp->m_list)) {
|
||||
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
|
||||
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
|
||||
|
|
|
@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
|
|||
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
|
||||
return 0;
|
||||
bytes = le16_to_cpu(sbp->s_bytes);
|
||||
if (bytes > BLOCK_SIZE)
|
||||
if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
|
||||
return 0;
|
||||
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
|
||||
sumoff);
|
||||
|
|
|
@ -63,6 +63,9 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
if (!err) {
|
||||
upperdentry = ovl_dentry_upper(dentry);
|
||||
|
||||
if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
|
||||
attr->ia_valid &= ~ATTR_MODE;
|
||||
|
||||
mutex_lock(&upperdentry->d_inode->i_mutex);
|
||||
err = notify_change(upperdentry, attr, NULL);
|
||||
if (!err)
|
||||
|
|
|
@ -532,15 +532,19 @@
|
|||
|
||||
#define INIT_TEXT \
|
||||
*(.init.text) \
|
||||
*(.text.startup) \
|
||||
MEM_DISCARD(init.text)
|
||||
|
||||
#define EXIT_DATA \
|
||||
*(.exit.data) \
|
||||
*(.fini_array) \
|
||||
*(.dtors) \
|
||||
MEM_DISCARD(exit.data) \
|
||||
MEM_DISCARD(exit.rodata)
|
||||
|
||||
#define EXIT_TEXT \
|
||||
*(.exit.text) \
|
||||
*(.text.exit) \
|
||||
MEM_DISCARD(exit.text)
|
||||
|
||||
#define EXIT_CALL \
|
||||
|
|
|
@ -156,6 +156,7 @@ struct bcma_host_ops {
|
|||
#define BCMA_CORE_DEFAULT 0xFFF
|
||||
|
||||
#define BCMA_MAX_NR_CORES 16
|
||||
#define BCMA_CORE_SIZE 0x1000
|
||||
|
||||
/* Chip IDs of PCIe devices */
|
||||
#define BCMA_CHIP_ID_BCM4313 0x4313
|
||||
|
|
|
@ -4793,6 +4793,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
|
|||
memset(css, 0, sizeof(*css));
|
||||
css->cgroup = cgrp;
|
||||
css->ss = ss;
|
||||
css->id = -1;
|
||||
INIT_LIST_HEAD(&css->sibling);
|
||||
INIT_LIST_HEAD(&css->children);
|
||||
css->serial_nr = css_serial_nr_next++;
|
||||
|
|
|
@ -4951,14 +4951,16 @@ void show_state_filter(unsigned long state_filter)
|
|||
/*
|
||||
* reset the NMI-timeout, listing all files on a slow
|
||||
* console might take a lot of time:
|
||||
* Also, reset softlockup watchdogs on all CPUs, because
|
||||
* another CPU might be blocked waiting for us to process
|
||||
* an IPI.
|
||||
*/
|
||||
touch_nmi_watchdog();
|
||||
touch_all_softlockup_watchdogs();
|
||||
if (!state_filter || (p->state & state_filter))
|
||||
sched_show_task(p);
|
||||
}
|
||||
|
||||
touch_all_softlockup_watchdogs();
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
sysrq_sched_debug_show();
|
||||
#endif
|
||||
|
|
|
@ -687,8 +687,6 @@ void init_entity_runnable_average(struct sched_entity *se)
|
|||
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
|
||||
}
|
||||
|
||||
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
|
||||
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
|
||||
#else
|
||||
void init_entity_runnable_average(struct sched_entity *se)
|
||||
{
|
||||
|
@ -4594,19 +4592,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
|||
return wl;
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
long w, W;
|
||||
struct cfs_rq *cfs_rq = se->my_q;
|
||||
long W, w = cfs_rq_load_avg(cfs_rq);
|
||||
|
||||
tg = se->my_q->tg;
|
||||
tg = cfs_rq->tg;
|
||||
|
||||
/*
|
||||
* W = @wg + \Sum rw_j
|
||||
*/
|
||||
W = wg + calc_tg_weight(tg, se->my_q);
|
||||
W = wg + atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure \Sum rw_j >= rw_i */
|
||||
W -= cfs_rq->tg_load_avg_contrib;
|
||||
W += w;
|
||||
|
||||
/*
|
||||
* w = rw_i + @wl
|
||||
*/
|
||||
w = cfs_rq_load_avg(se->my_q) + wl;
|
||||
w += wl;
|
||||
|
||||
/*
|
||||
* wl = S * s'_i; see (2)
|
||||
|
|
|
@ -808,6 +808,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
|
|||
timer->it.cpu.expires = 0;
|
||||
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
|
||||
&itp->it_value);
|
||||
return;
|
||||
} else {
|
||||
cpu_timer_sample_group(timer->it_clock, p, &now);
|
||||
unlock_task_sighand(p, &flags);
|
||||
|
|
|
@ -475,25 +475,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
|||
|
||||
/* Found a free page, break it into order-0 pages */
|
||||
isolated = split_free_page(page);
|
||||
if (!isolated)
|
||||
break;
|
||||
|
||||
total_isolated += isolated;
|
||||
cc->nr_freepages += isolated;
|
||||
for (i = 0; i < isolated; i++) {
|
||||
list_add(&page->lru, freelist);
|
||||
page++;
|
||||
}
|
||||
|
||||
/* If a page was split, advance to the end of it */
|
||||
if (isolated) {
|
||||
cc->nr_freepages += isolated;
|
||||
if (!strict &&
|
||||
cc->nr_migratepages <= cc->nr_freepages) {
|
||||
blockpfn += isolated;
|
||||
break;
|
||||
}
|
||||
|
||||
blockpfn += isolated - 1;
|
||||
cursor += isolated - 1;
|
||||
continue;
|
||||
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
|
||||
blockpfn += isolated;
|
||||
break;
|
||||
}
|
||||
/* Advance to the end of split page */
|
||||
blockpfn += isolated - 1;
|
||||
cursor += isolated - 1;
|
||||
continue;
|
||||
|
||||
isolate_fail:
|
||||
if (strict)
|
||||
|
@ -503,6 +501,9 @@ isolate_fail:
|
|||
|
||||
}
|
||||
|
||||
if (locked)
|
||||
spin_unlock_irqrestore(&cc->zone->lock, flags);
|
||||
|
||||
/*
|
||||
* There is a tiny chance that we have read bogus compound_order(),
|
||||
* so be careful to not go outside of the pageblock.
|
||||
|
@ -524,9 +525,6 @@ isolate_fail:
|
|||
if (strict && blockpfn < end_pfn)
|
||||
total_isolated = 0;
|
||||
|
||||
if (locked)
|
||||
spin_unlock_irqrestore(&cc->zone->lock, flags);
|
||||
|
||||
/* Update the pageblock-skip if the whole pageblock was scanned */
|
||||
if (blockpfn == end_pfn)
|
||||
update_pageblock_skip(cc, valid_page, total_isolated, false);
|
||||
|
@ -966,7 +964,6 @@ static void isolate_freepages(struct compact_control *cc)
|
|||
block_end_pfn = block_start_pfn,
|
||||
block_start_pfn -= pageblock_nr_pages,
|
||||
isolate_start_pfn = block_start_pfn) {
|
||||
|
||||
/*
|
||||
* This can iterate a massively long zone without finding any
|
||||
* suitable migration targets, so periodically check if we need
|
||||
|
@ -990,32 +987,30 @@ static void isolate_freepages(struct compact_control *cc)
|
|||
continue;
|
||||
|
||||
/* Found a block suitable for isolating free pages from. */
|
||||
isolate_freepages_block(cc, &isolate_start_pfn,
|
||||
block_end_pfn, freelist, false);
|
||||
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
|
||||
freelist, false);
|
||||
|
||||
/*
|
||||
* If we isolated enough freepages, or aborted due to async
|
||||
* compaction being contended, terminate the loop.
|
||||
* Remember where the free scanner should restart next time,
|
||||
* which is where isolate_freepages_block() left off.
|
||||
* But if it scanned the whole pageblock, isolate_start_pfn
|
||||
* now points at block_end_pfn, which is the start of the next
|
||||
* pageblock.
|
||||
* In that case we will however want to restart at the start
|
||||
* of the previous pageblock.
|
||||
* If we isolated enough freepages, or aborted due to lock
|
||||
* contention, terminate.
|
||||
*/
|
||||
if ((cc->nr_freepages >= cc->nr_migratepages)
|
||||
|| cc->contended) {
|
||||
if (isolate_start_pfn >= block_end_pfn)
|
||||
if (isolate_start_pfn >= block_end_pfn) {
|
||||
/*
|
||||
* Restart at previous pageblock if more
|
||||
* freepages can be isolated next time.
|
||||
*/
|
||||
isolate_start_pfn =
|
||||
block_start_pfn - pageblock_nr_pages;
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
} else if (isolate_start_pfn < block_end_pfn) {
|
||||
/*
|
||||
* isolate_freepages_block() should not terminate
|
||||
* prematurely unless contended, or isolated enough
|
||||
* If isolation failed early, do not continue
|
||||
* needlessly.
|
||||
*/
|
||||
VM_BUG_ON(isolate_start_pfn < block_end_pfn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
*/
|
||||
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
||||
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
|
||||
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
|
||||
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
|
||||
__GFP_ATOMIC)
|
||||
|
||||
/* The GFP flags allowed during early boot */
|
||||
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
|
||||
|
|
|
@ -275,7 +275,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
|
|||
/* Returns true if the struct page for the pfn is uninitialised */
|
||||
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
|
||||
{
|
||||
if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
|
||||
int nid = early_pfn_to_nid(pfn);
|
||||
|
||||
if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1057,7 +1059,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
|
|||
spin_lock(&early_pfn_lock);
|
||||
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
|
||||
if (nid < 0)
|
||||
nid = 0;
|
||||
nid = first_online_node;
|
||||
spin_unlock(&early_pfn_lock);
|
||||
|
||||
return nid;
|
||||
|
|
|
@ -1191,6 +1191,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
|
|||
return map;
|
||||
}
|
||||
|
||||
/*
|
||||
* Encoding order is (new_up_client, new_state, new_weight). Need to
|
||||
* apply in the (new_weight, new_state, new_up_client) order, because
|
||||
* an incremental map may look like e.g.
|
||||
*
|
||||
* new_up_client: { osd=6, addr=... } # set osd_state and addr
|
||||
* new_state: { osd=6, xorstate=EXISTS } # clear osd_state
|
||||
*/
|
||||
static int decode_new_up_state_weight(void **p, void *end,
|
||||
struct ceph_osdmap *map)
|
||||
{
|
||||
void *new_up_client;
|
||||
void *new_state;
|
||||
void *new_weight_end;
|
||||
u32 len;
|
||||
|
||||
new_up_client = *p;
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
|
||||
ceph_decode_need(p, end, len, e_inval);
|
||||
*p += len;
|
||||
|
||||
new_state = *p;
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
len *= sizeof(u32) + sizeof(u8);
|
||||
ceph_decode_need(p, end, len, e_inval);
|
||||
*p += len;
|
||||
|
||||
/* new_weight */
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
while (len--) {
|
||||
s32 osd;
|
||||
u32 w;
|
||||
|
||||
ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
|
||||
osd = ceph_decode_32(p);
|
||||
w = ceph_decode_32(p);
|
||||
BUG_ON(osd >= map->max_osd);
|
||||
pr_info("osd%d weight 0x%x %s\n", osd, w,
|
||||
w == CEPH_OSD_IN ? "(in)" :
|
||||
(w == CEPH_OSD_OUT ? "(out)" : ""));
|
||||
map->osd_weight[osd] = w;
|
||||
|
||||
/*
|
||||
* If we are marking in, set the EXISTS, and clear the
|
||||
* AUTOOUT and NEW bits.
|
||||
*/
|
||||
if (w) {
|
||||
map->osd_state[osd] |= CEPH_OSD_EXISTS;
|
||||
map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
|
||||
CEPH_OSD_NEW);
|
||||
}
|
||||
}
|
||||
new_weight_end = *p;
|
||||
|
||||
/* new_state (up/down) */
|
||||
*p = new_state;
|
||||
len = ceph_decode_32(p);
|
||||
while (len--) {
|
||||
s32 osd;
|
||||
u8 xorstate;
|
||||
int ret;
|
||||
|
||||
osd = ceph_decode_32(p);
|
||||
xorstate = ceph_decode_8(p);
|
||||
if (xorstate == 0)
|
||||
xorstate = CEPH_OSD_UP;
|
||||
BUG_ON(osd >= map->max_osd);
|
||||
if ((map->osd_state[osd] & CEPH_OSD_UP) &&
|
||||
(xorstate & CEPH_OSD_UP))
|
||||
pr_info("osd%d down\n", osd);
|
||||
if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
|
||||
(xorstate & CEPH_OSD_EXISTS)) {
|
||||
pr_info("osd%d does not exist\n", osd);
|
||||
map->osd_weight[osd] = CEPH_OSD_IN;
|
||||
ret = set_primary_affinity(map, osd,
|
||||
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
|
||||
if (ret)
|
||||
return ret;
|
||||
memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
|
||||
map->osd_state[osd] = 0;
|
||||
} else {
|
||||
map->osd_state[osd] ^= xorstate;
|
||||
}
|
||||
}
|
||||
|
||||
/* new_up_client */
|
||||
*p = new_up_client;
|
||||
len = ceph_decode_32(p);
|
||||
while (len--) {
|
||||
s32 osd;
|
||||
struct ceph_entity_addr addr;
|
||||
|
||||
osd = ceph_decode_32(p);
|
||||
ceph_decode_copy(p, &addr, sizeof(addr));
|
||||
ceph_decode_addr(&addr);
|
||||
BUG_ON(osd >= map->max_osd);
|
||||
pr_info("osd%d up\n", osd);
|
||||
map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
|
||||
map->osd_addr[osd] = addr;
|
||||
}
|
||||
|
||||
*p = new_weight_end;
|
||||
return 0;
|
||||
|
||||
e_inval:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* decode and apply an incremental map update.
|
||||
*/
|
||||
|
@ -1290,49 +1399,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|||
__remove_pg_pool(&map->pg_pools, pi);
|
||||
}
|
||||
|
||||
/* new_up */
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
while (len--) {
|
||||
u32 osd;
|
||||
struct ceph_entity_addr addr;
|
||||
ceph_decode_32_safe(p, end, osd, e_inval);
|
||||
ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
|
||||
ceph_decode_addr(&addr);
|
||||
pr_info("osd%d up\n", osd);
|
||||
BUG_ON(osd >= map->max_osd);
|
||||
map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
|
||||
map->osd_addr[osd] = addr;
|
||||
}
|
||||
|
||||
/* new_state */
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
while (len--) {
|
||||
u32 osd;
|
||||
u8 xorstate;
|
||||
ceph_decode_32_safe(p, end, osd, e_inval);
|
||||
xorstate = **(u8 **)p;
|
||||
(*p)++; /* clean flag */
|
||||
if (xorstate == 0)
|
||||
xorstate = CEPH_OSD_UP;
|
||||
if (xorstate & CEPH_OSD_UP)
|
||||
pr_info("osd%d down\n", osd);
|
||||
if (osd < map->max_osd)
|
||||
map->osd_state[osd] ^= xorstate;
|
||||
}
|
||||
|
||||
/* new_weight */
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
while (len--) {
|
||||
u32 osd, off;
|
||||
ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
|
||||
osd = ceph_decode_32(p);
|
||||
off = ceph_decode_32(p);
|
||||
pr_info("osd%d weight 0x%x %s\n", osd, off,
|
||||
off == CEPH_OSD_IN ? "(in)" :
|
||||
(off == CEPH_OSD_OUT ? "(out)" : ""));
|
||||
if (osd < map->max_osd)
|
||||
map->osd_weight[osd] = off;
|
||||
}
|
||||
/* new_up_client, new_state, new_weight */
|
||||
err = decode_new_up_state_weight(p, end, map);
|
||||
if (err)
|
||||
goto bad;
|
||||
|
||||
/* new_pg_temp */
|
||||
err = decode_new_pg_temp(p, end, map);
|
||||
|
|
|
@ -421,7 +421,7 @@ static int rds_tcp_init(void)
|
|||
|
||||
ret = rds_tcp_recv_init();
|
||||
if (ret)
|
||||
goto out_slab;
|
||||
goto out_pernet;
|
||||
|
||||
ret = rds_trans_register(&rds_tcp_transport);
|
||||
if (ret)
|
||||
|
@ -433,8 +433,9 @@ static int rds_tcp_init(void)
|
|||
|
||||
out_recv:
|
||||
rds_tcp_recv_exit();
|
||||
out_slab:
|
||||
out_pernet:
|
||||
unregister_pernet_subsys(&rds_tcp_net_ops);
|
||||
out_slab:
|
||||
kmem_cache_destroy(rds_tcp_conn_slab);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -1247,6 +1247,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
|
|||
tu->tstamp = *tstamp;
|
||||
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
|
||||
return;
|
||||
memset(&r1, 0, sizeof(r1));
|
||||
r1.event = event;
|
||||
r1.tstamp = *tstamp;
|
||||
r1.val = resolution;
|
||||
|
@ -1281,6 +1282,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
|
|||
}
|
||||
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
|
||||
tu->last_resolution != resolution) {
|
||||
memset(&r1, 0, sizeof(r1));
|
||||
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
|
||||
r1.tstamp = tstamp;
|
||||
r1.val = resolution;
|
||||
|
@ -1746,6 +1748,7 @@ static int snd_timer_user_params(struct file *file,
|
|||
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
|
||||
if (tu->tread) {
|
||||
struct snd_timer_tread tread;
|
||||
memset(&tread, 0, sizeof(tread));
|
||||
tread.event = SNDRV_TIMER_EVENT_EARLY;
|
||||
tread.tstamp.tv_sec = 0;
|
||||
tread.tstamp.tv_nsec = 0;
|
||||
|
|
Loading…
Add table
Reference in a new issue