Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android

Conflicts:
	d_canonical_path in include/linux/dcache.h
This commit is contained in:
Alex Shi 2016-04-21 14:08:44 +08:00
commit bab1564182
153 changed files with 1339 additions and 599 deletions

View file

@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
mfio81 dreq0, mips_trace_data, eth_debug
mfio82 dreq1, mips_trace_data, eth_debug
mfio83 mips_pll_lock, mips_trace_data, usb_debug
mfio84 sys_pll_lock, mips_trace_data, usb_debug
mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug
mfio86 bt_pll_lock, mips_trace_data, sdhost_debug
mfio87 rpu_v_pll_lock, dreq2, socif_debug
mfio88 rpu_l_pll_lock, dreq3, socif_debug
mfio89 audio_pll_lock, dreq4, dreq5
mfio84 audio_pll_lock, mips_trace_data, usb_debug
mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
mfio87 sys_pll_lock, dreq2, socif_debug
mfio88 wifi_pll_lock, dreq3, socif_debug
mfio89 bt_pll_lock, dreq4, dreq5
tck
trstn
tdi

View file

@ -3932,6 +3932,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
sector if the number is odd);
i = IGNORE_DEVICE (don't bind to this
device);
j = NO_REPORT_LUNS (don't use report luns
command, uas only);
l = NOT_LOCKABLE (don't try to lock and
unlock ejectable media);
m = MAX_SECTORS_64 (don't transfer more

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 7
SUBLEVEL = 8
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -1 +1,5 @@
#ifdef CONFIG_CPU_BIG_ENDIAN
#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
#endif
#include <../../arm/include/asm/opcodes.h>

View file

@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
static DEFINE_RWLOCK(step_hook_lock);
static DEFINE_SPINLOCK(step_hook_lock);
void register_step_hook(struct step_hook *hook)
{
write_lock(&step_hook_lock);
list_add(&hook->node, &step_hook);
write_unlock(&step_hook_lock);
spin_lock(&step_hook_lock);
list_add_rcu(&hook->node, &step_hook);
spin_unlock(&step_hook_lock);
}
void unregister_step_hook(struct step_hook *hook)
{
write_lock(&step_hook_lock);
list_del(&hook->node);
write_unlock(&step_hook_lock);
spin_lock(&step_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&step_hook_lock);
synchronize_rcu();
}
/*
@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
struct step_hook *hook;
int retval = DBG_HOOK_ERROR;
read_lock(&step_hook_lock);
rcu_read_lock();
list_for_each_entry(hook, &step_hook, node) {
list_for_each_entry_rcu(hook, &step_hook, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
read_unlock(&step_hook_lock);
rcu_read_unlock();
return retval;
}

View file

@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
if (board == BCSR_WHOAMI_DB1500) {
c0 = AU1500_GPIO2_INT;
c1 = AU1500_GPIO5_INT;
d0 = AU1500_GPIO0_INT;
d1 = AU1500_GPIO3_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO1_INT;
s1 = AU1500_GPIO4_INT;
} else if (board == BCSR_WHOAMI_DB1100) {
c0 = AU1100_GPIO2_INT;
c1 = AU1100_GPIO5_INT;
d0 = AU1100_GPIO0_INT;
d1 = AU1100_GPIO3_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
} else if (board == BCSR_WHOAMI_DB1000) {
c0 = AU1000_GPIO2_INT;
c1 = AU1000_GPIO5_INT;
d0 = AU1000_GPIO0_INT;
d1 = AU1000_GPIO3_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1000_GPIO1_INT;
s1 = AU1000_GPIO4_INT;
platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
} else if ((board == BCSR_WHOAMI_PB1500) ||
(board == BCSR_WHOAMI_PB1500R2)) {
c0 = AU1500_GPIO203_INT;
d0 = AU1500_GPIO201_INT;
d0 = 1; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO202_INT;
twosocks = 0;
flashsize = 64;
@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
*/
} else if (board == BCSR_WHOAMI_PB1100) {
c0 = AU1100_GPIO11_INT;
d0 = AU1100_GPIO9_INT;
d0 = 9; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO10_INT;
twosocks = 0;
flashsize = 64;
@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
} else
return 0; /* unknown board, no further dev setup to do */
irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);

View file

@ -514,7 +514,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
AU1550_GPIO3_INT, AU1550_GPIO0_INT,
AU1550_GPIO3_INT, 0,
/*AU1550_GPIO21_INT*/0, 0, 0);
db1x_register_pcmcia_socket(
@ -524,7 +524,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
AU1550_GPIO5_INT, AU1550_GPIO1_INT,
AU1550_GPIO5_INT, 1,
/*AU1550_GPIO22_INT*/0, 0, 1);
platform_device_register(&db1550_nand_dev);

View file

@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
unsigned int res, preempted;
unsigned long origpc;
unsigned long orig31;
void __user *fault_addr = NULL;
@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
goto sigbus;
/*
* Disable preemption to avoid a race between copying
* state from userland, migrating to another CPU and
* updating the hardware vector register below.
*/
preempt_disable();
do {
/*
* If we have live MSA context keep track of
* whether we get preempted in order to avoid
* the register context we load being clobbered
* by the live context as it's saved during
* preemption. If we don't have live context
* then it can't be saved to clobber the value
* we load.
*/
preempted = test_thread_flag(TIF_USEDMSA);
res = __copy_from_user_inatomic(fpr, addr,
sizeof(*fpr));
if (res)
goto fault;
res = __copy_from_user_inatomic(fpr, addr,
sizeof(*fpr));
if (res)
goto fault;
/*
* Update the hardware register if it is in use by the
* task in this quantum, in order to avoid having to
* save & restore the whole vector context.
*/
if (test_thread_flag(TIF_USEDMSA))
write_msa_wr(wd, fpr, df);
preempt_enable();
/*
* Update the hardware register if it is in use
* by the task in this quantum, in order to
* avoid having to save & restore the whole
* vector context.
*/
preempt_disable();
if (test_thread_flag(TIF_USEDMSA)) {
write_msa_wr(wd, fpr, df);
preempted = 0;
}
preempt_enable();
} while (preempted);
break;
case msa_st_op:

View file

@ -76,6 +76,7 @@ struct exception_table_entry {
*/
struct exception_data {
unsigned long fault_ip;
unsigned long fault_gp;
unsigned long fault_space;
unsigned long fault_addr;
};

View file

@ -299,6 +299,7 @@ int main(void)
#endif
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
BLANK();

View file

@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
/* Global fixups */
extern void fixup_get_user_skip_1(void);
extern void fixup_get_user_skip_2(void);
extern void fixup_put_user_skip_1(void);
extern void fixup_put_user_skip_2(void);
/* Global fixups - defined as int to avoid creation of function pointers */
extern int fixup_get_user_skip_1;
extern int fixup_get_user_skip_2;
extern int fixup_put_user_skip_1;
extern int fixup_put_user_skip_2;
EXPORT_SYMBOL(fixup_get_user_skip_1);
EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);

View file

@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (fault_space == 0 && !faulthandler_disabled())
{
/* Clean up and return if in exception table. */
if (fixup_exception(regs))
return;
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}

View file

@ -26,6 +26,7 @@
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
loadgp
addil LT%__per_cpu_offset,%r27
LDREG RT%__per_cpu_offset(%r1),\t1
/* t2 = smp_processor_id() */
@ -40,14 +41,19 @@
LDREG RT%exception_data(%r1),\t1
/* t1 = this_cpu_ptr(&exception_data) */
add,l \t1,\t2,\t1
/* %r27 = t1->fault_gp - restore gp */
LDREG EXCDATA_GP(\t1), %r27
/* t1 = t1->fault_ip */
LDREG EXCDATA_IP(\t1), \t1
.endm
#else
.macro get_fault_ip t1 t2
loadgp
/* t1 = this_cpu_ptr(&exception_data) */
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t2
/* %r27 = t2->fault_gp - restore gp */
LDREG EXCDATA_GP(\t2), %r27
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm

View file

@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
struct exception_data *d;
d = this_cpu_ptr(&exception_data);
d->fault_ip = regs->iaoq[0];
d->fault_gp = regs->gr[27];
d->fault_space = regs->isr;
d->fault_addr = regs->ior;

View file

@ -486,13 +486,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
struct hugepd_freelist **batchp;
batchp = this_cpu_ptr(&hugepd_freelist_cur);
batchp = &get_cpu_var(hugepd_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpumask_equal(mm_cpumask(tlb->mm),
cpumask_of(smp_processor_id()))) {
kmem_cache_free(hugepte_cache, hugepte);
put_cpu_var(hugepd_freelist_cur);
put_cpu_var(hugepd_freelist_cur);
return;
}

View file

@ -41,7 +41,7 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS

View file

@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
extern bool mp_should_keep_irq(struct device *dev);
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);

View file

@ -6024,12 +6024,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}
/* try to inject new event if pending */
if (vcpu->arch.nmi_pending) {
if (kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
}
if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
@ -6474,10 +6472,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */
else if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
else {
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
}
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);

View file

@ -673,28 +673,22 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
int pcibios_alloc_irq(struct pci_dev *dev)
{
/*
* If the PCI device was already claimed by core code and has
* MSI enabled, probing of the pcibios IRQ will overwrite
* dev->irq. So bail out if MSI is already enabled.
*/
if (pci_dev_msi_enabled(dev))
return -EBUSY;
return pcibios_enable_irq(dev);
}
void pcibios_free_irq(struct pci_dev *dev)
{
if (pcibios_disable_irq)
pcibios_disable_irq(dev);
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
return pci_enable_resources(dev, mask);
int err;
if ((err = pci_enable_resources(dev, mask)) < 0)
return err;
if (!pci_dev_msi_enabled(dev))
return pcibios_enable_irq(dev);
return 0;
}
void pcibios_disable_device (struct pci_dev *dev)
{
if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(void)

View file

@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
int polarity;
int ret;
if (pci_has_managed_irq(dev))
if (dev->irq_managed && dev->irq > 0)
return 0;
switch (intel_mid_identify_cpu()) {
@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
if (pci_has_managed_irq(dev)) {
if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
/*
* Don't reset dev->irq here, otherwise
* intel_mid_pci_irq_enable() will fail on next call.
*/
}
}

View file

@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
struct pci_dev *temp_dev;
int irq;
if (pci_has_managed_irq(dev))
if (dev->irq_managed && dev->irq > 0)
return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
}
dev = temp_dev;
if (irq >= 0) {
pci_set_managed_irq(dev, irq);
dev->irq_managed = 1;
dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
return 0;
@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
bool mp_should_keep_irq(struct device *dev)
{
if (dev->power.is_prepared)
return true;
#ifdef CONFIG_PM
if (dev->power.runtime_status == RPM_SUSPENDING)
return true;
#endif
return false;
}
static void pirq_disable_irq(struct pci_dev *dev)
{
if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) {
if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
pci_reset_managed_irq(dev);
dev->irq = 0;
dev->irq_managed = 0;
}
}

View file

@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
int cached_ret = -ENOKEY;
int ret;
*_trusted = false;
for (p = pkcs7->certs; p; p = p->next)
p->seen = false;

View file

@ -409,7 +409,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
if (pci_has_managed_irq(dev))
if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@ -454,7 +454,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
kfree(entry);
return rc;
}
pci_set_managed_irq(dev, rc);
dev->irq = rc;
dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@ -477,9 +478,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
if (!pin || !pci_has_managed_irq(dev))
if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
/* Keep IOAPIC pin configuration when suspending */
if (dev->dev.power.is_prepared)
return;
#ifdef CONFIG_PM
if (dev->dev.power.runtime_status == RPM_SUSPENDING)
return;
#endif
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@ -499,6 +508,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
pci_reset_managed_irq(dev);
dev->irq_managed = 0;
}
}

View file

@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
GFP_ATOMIC);
GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
false, GFP_ATOMIC);
false, GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
GFP_ATOMIC);
GFP_NOIO);
if (!obj_request->bio_list)
goto out_unwind;
} else if (type == OBJ_REQUEST_PAGES) {

View file

@ -898,14 +898,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
return 0;
}
@ -926,6 +918,14 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;

View file

@ -852,14 +852,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
return 0;
}
@ -870,6 +862,8 @@ static int gmc_v8_0_late_init(void *handle)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
#define mmMC_SEQ_MISC0_FIJI 0xA71
static int gmc_v8_0_sw_init(void *handle)
{
int r;
@ -880,6 +874,19 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp;
if (adev->asic_type == CHIP_FIJI)
tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
else
tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;

View file

@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
int err;
int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
mutex_lock(&aux->hw_mutex);
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
return err;
goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
return -EPROTO;
return err;
err = -EPROTO;
goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
return -EIO;
err = -EIO;
goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
return -EIO;
err = -EIO;
unlock:
mutex_unlock(&aux->hw_mutex);
return err;
}
/**
@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
mutex_lock(&aux->hw_mutex);
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
mutex_unlock(&aux->hw_mutex);
return err;
}

View file

@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
/* limit mclk on all R7 370 parts for stability */
if (rdev->pdev->device == 0x6811 &&
rdev->pdev->revision == 0x81)
max_mclk = 120000;
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;

View file

@ -539,7 +539,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
drm_gem_object_unreference(&ufbdev->ufb.obj->base);
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}

View file

@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
drm_gem_object_unreference(&obj->base);
drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}

View file

@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
static void usbhid_restart_queues(struct usbhid_device *usbhid)
{
if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
usbhid_restart_out_queue(usbhid);
if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
usbhid_restart_ctrl_queue(usbhid);
}
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
static void hid_restart_io(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
spin_lock_irq(&usbhid->lock);
clear_bit(HID_SUSPENDED, &usbhid->iofl);
usbhid_mark_busy(usbhid);
if (clear_halt || reset_pending)
schedule_work(&usbhid->reset_work);
usbhid->retry_delay = 0;
spin_unlock_irq(&usbhid->lock);
if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
return;
if (!clear_halt) {
if (hid_start_in(hid) < 0)
hid_io_error(hid);
}
spin_lock_irq(&usbhid->lock);
if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
usbhid_restart_out_queue(usbhid);
if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
usbhid_restart_ctrl_queue(usbhid);
spin_unlock_irq(&usbhid->lock);
}
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
/* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
status = hid_start_in(hid);
if (status < 0)
hid_io_error(hid);
usbhid_restart_queues(usbhid);
hid_restart_io(hid);
return 0;
}
@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
struct usbhid_device *usbhid = hid->driver_data;
int status;
spin_lock_irq(&usbhid->lock);
clear_bit(HID_SUSPENDED, &usbhid->iofl);
usbhid_mark_busy(usbhid);
if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
test_bit(HID_RESET_PENDING, &usbhid->iofl))
schedule_work(&usbhid->reset_work);
usbhid->retry_delay = 0;
usbhid_restart_queues(usbhid);
spin_unlock_irq(&usbhid->lock);
status = hid_start_in(hid);
if (status < 0)
hid_io_error(hid);
int status = 0;
hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
struct usbhid_device *usbhid = hid->driver_data;
int status;
if (!test_bit(HID_STARTED, &usbhid->iofl))
return 0;
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
int status;
clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);

View file

@ -2492,6 +2492,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
}
/*
* Hack for the Bamboo One:
* the device presents a PAD/Touch interface as most Bamboos and even
* sends ghosts PAD data on it. However, later, we must disable this
* ghost interface, and we can not detect it unless we set it here
* to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
*/
if (features->type == BAMBOO_PEN &&
features->pktlen == WACOM_PKGLEN_BBTOUCH3)
features->device_type |= WACOM_DEVICETYPE_PAD;
/*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D

View file

@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
if (!the_max1111 || !the_max1111->spi)
return -ENODEV;
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
#ifdef CONFIG_SHARPSL_PM
the_max1111 = NULL;
#endif
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);

View file

@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
{
int ret;
int axis = chan->scan_index;
unsigned int raw_val;
__le16 raw_val;
mutex_lock(&data->mutex);
ret = bmc150_accel_set_power_state(data, true);
@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
}
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
&raw_val, 2);
&raw_val, sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
*val = sign_extend32(raw_val >> chan->scan_type.shift,
*val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
ret = bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.realbits = (bits), \
.storagebits = 16, \
.shift = 16 - (bits), \
.endianness = IIO_LE, \
}, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \

View file

@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
int ret;
unsigned int raw_val;
__le16 raw_val;
mutex_lock(&data->mutex);
ret = bmg160_set_power_state(data, true);
@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
}
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
2);
sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
return ret;
}
*val = sign_extend32(raw_val, 15);
*val = sign_extend32(le16_to_cpu(raw_val), 15);
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
if (ret < 0)
@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
.endianness = IIO_LE, \
}, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
data->buffer[i++] = ret;
data->buffer[i++] = val;
}
mutex_unlock(&data->mutex);

View file

@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
{
}
#define ST_MAGN_TRIGGER_SET_STATE NULL
#endif /* CONFIG_IIO_BUFFER */
#endif /* ST_MAGN_H */

View file

@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (!group->default_domain) {
group->default_domain = __iommu_domain_alloc(dev->bus,
IOMMU_DOMAIN_DMA);
group->domain = group->default_domain;
if (!group->domain)
group->domain = group->default_domain;
}
ret = iommu_group_add_device(group, dev);

View file

@ -2119,14 +2119,12 @@ static int coda_probe(struct platform_device *pdev)
pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
if (of_id) {
if (of_id)
dev->devtype = of_id->data;
} else if (pdev_id) {
else if (pdev_id)
dev->devtype = &coda_devdata[pdev_id->driver_data];
} else {
ret = -EINVAL;
goto err_v4l2_register;
}
else
return -EINVAL;
spin_lock_init(&dev->irqlock);
INIT_LIST_HEAD(&dev->instances);

View file

@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_lock(sru->ctrls.lock);
ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
& (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
mutex_unlock(sru->ctrls.lock);
vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);

View file

@ -159,7 +159,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
Set the status so poll routines can check and avoid
access after disconnect.
*/
dev->dev_state = DEV_DISCONNECTED;
set_bit(DEV_DISCONNECTED, &dev->dev_state);
au0828_rc_unregister(dev);
/* Digital TV */

View file

@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
bool first = true;
/* do nothing if device is disconnected */
if (ir->dev->dev_state == DEV_DISCONNECTED)
if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
return 0;
/* Check IR int */
@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
/* do nothing if device is disconnected */
if (ir->dev->dev_state != DEV_DISCONNECTED) {
if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
}

View file

@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
if (dev->dev_state & DEV_DISCONNECTED) {
if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
if (dev->dev_state & DEV_MISCONFIGURED) {
pr_info("v4l2 ioctl: device is misconfigured; "
"close and open it again\n");
if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
return -EIO;
}
return 0;
@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (!dev)
return 0;
if ((dev->dev_state & DEV_DISCONNECTED) ||
(dev->dev_state & DEV_MISCONFIGURED))
if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
test_bit(DEV_MISCONFIGURED, &dev->dev_state))
return 0;
if (urb->status < 0) {
@ -766,10 +765,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
int ret = 0;
dev->stream_state = STREAM_INTERRUPT;
if (dev->dev_state == DEV_DISCONNECTED)
if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
else if (ret) {
dev->dev_state = DEV_MISCONFIGURED;
set_bit(DEV_MISCONFIGURED, &dev->dev_state);
dprintk(1, "%s device is misconfigured!\n", __func__);
return ret;
}
@ -958,7 +957,7 @@ static int au0828_v4l2_open(struct file *filp)
int ret;
dprintk(1,
"%s called std_set %d dev_state %d stream users %d users %d\n",
"%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@ -977,7 +976,7 @@ static int au0828_v4l2_open(struct file *filp)
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
dev->stream_state = STREAM_OFF;
dev->dev_state |= DEV_INITIALIZED;
set_bit(DEV_INITIALIZED, &dev->dev_state);
}
dev->users++;
mutex_unlock(&dev->lock);
@ -991,7 +990,7 @@ static int au0828_v4l2_close(struct file *filp)
struct video_device *vdev = video_devdata(filp);
dprintk(1,
"%s called std_set %d dev_state %d stream users %d users %d\n",
"%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@ -1007,7 +1006,7 @@ static int au0828_v4l2_close(struct file *filp)
del_timer_sync(&dev->vbi_timeout);
}
if (dev->dev_state == DEV_DISCONNECTED)
if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
goto end;
if (dev->users == 1) {
@ -1036,7 +1035,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (dev->std_set_in_tuner_core)
@ -1108,7 +1107,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@ -1151,7 +1150,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
@ -1170,7 +1169,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@ -1182,7 +1181,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct au0828_dev *dev = video_drvdata(file);
int rc;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
rc = check_dev(dev);
@ -1204,7 +1203,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (norm == dev->std)
@ -1236,7 +1235,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
@ -1259,7 +1258,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_DEBUG] = "tv debug"
};
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
tmp = input->index;
@ -1289,7 +1288,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*i = dev->ctrl_input;
@ -1300,7 +1299,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
switch (AUVI_INPUT(index).type) {
@ -1385,7 +1384,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
@ -1405,7 +1404,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
if (a->index != dev->ctrl_ainput)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
@ -1417,7 +1416,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (t->index != 0)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strcpy(t->name, "Auvitek tuner");
@ -1437,7 +1436,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (t->index != 0)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@ -1459,7 +1458,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
@ -1474,7 +1473,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@ -1500,7 +1499,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
@ -1526,7 +1525,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
cc->bounds.left = 0;
@ -1548,7 +1547,7 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
@ -1561,7 +1560,7 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);

View file

@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@ -122,9 +123,9 @@ enum au0828_stream_state {
/* device state */
enum au0828_dev_state {
DEV_INITIALIZED = 0x01,
DEV_DISCONNECTED = 0x02,
DEV_MISCONFIGURED = 0x04
DEV_INITIALIZED = 0,
DEV_DISCONNECTED = 1,
DEV_MISCONFIGURED = 2
};
struct au0828_dev;
@ -248,7 +249,7 @@ struct au0828_dev {
int input_type;
int std_set_in_tuner_core;
unsigned int ctrl_input;
enum au0828_dev_state dev_state;
long unsigned int dev_state; /* defined at enum au0828_dev_state */;
enum au0828_stream_state stream_state;
wait_queue_head_t open;

View file

@ -1463,9 +1463,23 @@ static int usbvision_probe(struct usb_interface *intf,
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
interface = &dev->actconfig->interface[ifnum]->altsetting[0];
else {
dev_err(&intf->dev, "interface %d is invalid, max is %d\n",
ifnum, dev->actconfig->desc.bNumInterfaces - 1);
ret = -ENODEV;
goto err_usb;
}
if (interface->desc.bNumEndpoints < 2) {
dev_err(&intf->dev, "interface %d has %d endpoints, but must"
" have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
ret = -ENODEV;
goto err_usb;
}
endpoint = &interface->endpoint[1].desc;
if (!usb_endpoint_xfer_isoc(endpoint)) {
dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n",
__func__, ifnum);

View file

@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
@ -1171,6 +1172,30 @@ static const struct pci_device_id pci_ids[] = {
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_BXTM_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,

View file

@ -28,6 +28,9 @@
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0

View file

@ -3260,6 +3260,30 @@ static int bond_close(struct net_device *bond_dev)
return 0;
}
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
* that some drivers can provide 32bit values only.
*/
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
const struct rtnl_link_stats64 *_new,
const struct rtnl_link_stats64 *_old)
{
const u64 *new = (const u64 *)_new;
const u64 *old = (const u64 *)_old;
u64 *res = (u64 *)_res;
int i;
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
res[i] += (u32)nv - (u32)ov;
else
res[i] += nv - ov;
}
}
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@ -3268,43 +3292,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
bond_for_each_slave(bond, slave, iter) {
const struct rtnl_link_stats64 *sstats =
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
struct rtnl_link_stats64 *pstats = &slave->slave_stats;
stats->rx_packets += sstats->rx_packets - pstats->rx_packets;
stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
stats->multicast += sstats->multicast - pstats->multicast;
stats->collisions += sstats->collisions - pstats->collisions;
stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
bond_fold_stats(stats, new, &slave->slave_stats);
/* save off the slave stats for the next run */
memcpy(pstats, sstats, sizeof(*sstats));
memcpy(&slave->slave_stats, new, sizeof(*new));
}
rcu_read_unlock();
memcpy(&bond->bond_stats, stats, sizeof(*stats));
spin_unlock(&bond->stats_lock);
return stats;
}
@ -4118,6 +4122,7 @@ void bond_setup(struct net_device *bond_dev)
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */

View file

@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
tx_cb_ptr->skb->len,
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;

View file

@ -3312,13 +3312,14 @@ jme_resume(struct device *dev)
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
atomic_inc(&jme->link_changing);
jme_reset_link(jme);
jme_start_irq(jme);
return 0;
}

View file

@ -3132,7 +3132,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
if (slave != mlx4_master_func_num(dev))
if (slave != mlx4_master_func_num(dev)) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
@ -3151,6 +3151,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
}
}
break;
default:
break;

View file

@ -61,6 +61,8 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
unsigned int interval; /* ms */
} fdb_notify;
#define MLXSW_SP_MIN_AGEING_TIME 10
#define MLXSW_SP_MAX_AGEING_TIME 1000000
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct {

View file

@ -232,8 +232,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
if (switchdev_trans_ph_prepare(trans))
return 0;
if (switchdev_trans_ph_prepare(trans)) {
if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
ageing_time > MLXSW_SP_MAX_AGEING_TIME)
return -ERANGE;
else
return 0;
}
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}

View file

@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
u64 tx_dma_map_error;
u64 spurious_intr;
u64 mac_filter_limit_overrun;
u64 mbx_spurious_intr;
};
/*
@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
unsigned long status;
spinlock_t queue_lock; /* Mailbox queue lock */
spinlock_t aen_lock; /* Mailbox response/AEN lock */
atomic_t rsp_status;
u32 rsp_status;
u32 num_cmds;
};

View file

@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
{
atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
complete(&mbx->completion);
}
@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
if (atomic_read(&mbx->rsp_status) != rsp_status)
if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
out:
@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
if (atomic_read(&mbx->rsp_status) != rsp_status)
if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
}
@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
u32 mask, resp, event;
unsigned long flags;
mbx = adapter->ahw->mailbox;
@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
else
qlcnic_83xx_notify_mbx_response(mbx);
} else {
if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
else
adapter->stats.mbx_spurious_intr++;
}
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_adapter *adapter = mbx->adapter;
const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
struct qlcnic_hardware_context *ahw;
struct qlcnic_cmd_args *cmd = NULL;
unsigned long flags;
ahw = adapter->ahw;
@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
return;
}
atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
spin_lock_irqsave(&mbx->aen_lock, flags);
mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
spin_unlock_irqrestore(&mbx->aen_lock, flags);
spin_lock(&mbx->queue_lock);

View file

@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
{"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
QLC_OFF(stats.mbx_spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {

View file

@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(new_skb, length), skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */

View file

@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->netdev_ops = &qcaspi_netdev_ops;
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
dev->flags = IFF_MULTICAST;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
qca = netdev_priv(dev);

View file

@ -1185,11 +1185,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
sh_eth_set_receive_align(skb);
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
@ -1197,6 +1194,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
}
mdp->rx_skbuff[i] = skb;
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
@ -1212,7 +1213,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
if (rxdesc)
rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);

View file

@ -239,6 +239,7 @@ struct rocker {
struct {
u64 id;
} hw;
unsigned long ageing_time;
spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
struct rocker_port *rocker_port;
struct rocker_fdb_tbl_entry *entry;
struct hlist_node *tmp;
unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
unsigned long next_timer = jiffies + rocker->ageing_time;
unsigned long expires;
unsigned long lock_flags;
int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
struct switchdev_trans *trans,
u32 ageing_time)
{
struct rocker *rocker = rocker_port->rocker;
if (!switchdev_trans_ph_prepare(trans)) {
rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
if (rocker_port->ageing_time < rocker->ageing_time)
rocker->ageing_time = rocker_port->ageing_time;
mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
}
@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_tbls;
}
rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
(unsigned long) rocker);
mod_timer(&rocker->fdb_cleanup_timer, jiffies);
rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
err = rocker_probe_ports(rocker);
if (err) {
dev_err(&pdev->dev, "failed to probe ports\n");

View file

@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
else if (copylen < ETH_HLEN)
copylen = ETH_HLEN;
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (!zerocopy) {
copylen = len;
if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
if (linear > good_linear)
linear = good_linear;
else
linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
else if (linear < ETH_HLEN)
linear = ETH_HLEN;
}
skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,

View file

@ -567,7 +567,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ppp_file *pf = file->private_data;
struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
struct ppp_idle idle;
@ -577,9 +577,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
int __user *p = argp;
if (!pf)
return ppp_unattached_ioctl(current->nsproxy->net_ns,
pf, file, cmd, arg);
mutex_lock(&ppp_mutex);
pf = file->private_data;
if (!pf) {
err = ppp_unattached_ioctl(current->nsproxy->net_ns,
pf, file, cmd, arg);
goto out;
}
if (cmd == PPPIOCDETACH) {
/*
@ -594,7 +599,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* this fd and reopening /dev/ppp.
*/
err = -EINVAL;
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
@ -608,15 +612,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
atomic_long_read(&file->f_count));
mutex_unlock(&ppp_mutex);
return err;
goto out;
}
if (pf->kind == CHANNEL) {
struct channel *pch;
struct ppp_channel *chan;
mutex_lock(&ppp_mutex);
pch = PF_TO_CHANNEL(pf);
switch (cmd) {
@ -638,17 +640,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = chan->ops->ioctl(chan, cmd, arg);
up_read(&pch->chan_sem);
}
mutex_unlock(&ppp_mutex);
return err;
goto out;
}
if (pf->kind != INTERFACE) {
/* can't happen */
pr_err("PPP: not interface or channel??\n");
return -EINVAL;
err = -EINVAL;
goto out;
}
mutex_lock(&ppp_mutex);
ppp = PF_TO_PPP(pf);
switch (cmd) {
case PPPIOCSMRU:
@ -823,7 +824,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
default:
err = -ENOTTY;
}
out:
mutex_unlock(&ppp_mutex);
return err;
}
@ -836,7 +840,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct ppp_net *pn;
int __user *p = (int __user *)arg;
mutex_lock(&ppp_mutex);
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */
@ -886,7 +889,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
default:
err = -ENOTTY;
}
mutex_unlock(&ppp_mutex);
return err;
}
@ -2290,7 +2293,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = net;
pch->chan_net = get_net(net);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@ -2387,6 +2390,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
put_net(pch->chan_net);
pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@ -2803,6 +2808,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
out2:
mutex_unlock(&pn->all_ppp_mutex);
rtnl_unlock();
free_netdev(dev);
out1:
*retp = ret;

View file

@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
lockdep_rtnl_is_held());
if (!err)
goto out;
}
@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
case IFF_TAP:
@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
eth_hw_addr_random(dev);
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
}
}
@ -1463,6 +1462,8 @@ static void tun_setup(struct net_device *dev)
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
/* We prefer our own queue length */
dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
@ -1804,7 +1805,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
sk_detach_filter(tfile->socket.sk);
__sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
}
tun->filter_attached = false;
@ -1817,7 +1818,8 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
lockdep_rtnl_is_held());
if (ret) {
tun_detach_filter(tun, i);
return ret;

View file

@ -160,6 +160,12 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
info->u = header.usb_cdc_union_desc;
info->header = header.usb_cdc_header_desc;
info->ether = header.usb_cdc_ether_desc;
if (!info->u) {
if (rndis)
goto skip;
else /* in that case a quirk is mandatory */
goto bad_desc;
}
/* we need a master/control interface (what we're
* probed with) and a slave/data interface; union
* descriptors sort this all out.
@ -256,7 +262,7 @@ skip:
goto bad_desc;
}
} else if (!info->header || !info->u || (!rndis && !info->ether)) {
} else if (!info->header || (!rndis && !info->ether)) {
dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
info->header ? "" : "header ",
info->u ? "" : "union ",

View file

@ -794,7 +794,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* reset data interface */
/* Reset data interface. Some devices will not reset properly
* unless they are configured first. Toggle the altsetting to
* force a reset
*/
usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");

View file

@ -699,6 +699,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@ -718,8 +719,10 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */

View file

@ -1766,6 +1766,13 @@ out3:
if (info->unbind)
info->unbind (dev, udev);
out1:
/* subdrivers must undo all they did in bind() if they
* fail it, but we may fail later and a deferred kevent
* may trigger an error resubmitting itself and, worse,
* schedule a timer. So we kill it all just in case.
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
free_netdev(net);
out:
return status;

View file

@ -114,20 +114,23 @@ static struct dst_ops vrf_dst_ops = {
#if IS_ENABLED(CONFIG_IPV6)
static bool check_ipv6_frame(const struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
size_t hlen = sizeof(*ipv6h);
const struct ipv6hdr *ipv6h;
struct ipv6hdr _ipv6h;
bool rc = true;
if (skb->len < hlen)
ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
if (!ipv6h)
goto out;
if (ipv6h->nexthdr == NEXTHDR_ICMP) {
const struct icmp6hdr *icmph;
struct icmp6hdr _icmph;
if (skb->len < hlen + sizeof(*icmph))
icmph = skb_header_pointer(skb, sizeof(_ipv6h),
sizeof(_icmph), &_icmph);
if (!icmph)
goto out;
icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
switch (icmph->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:

View file

@ -1306,8 +1306,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
gbp = (struct vxlanhdr_gbp *)vxh;
md->gbp = ntohs(gbp->policy_id);
if (tun_dst)
if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
tun_dst->u.tun_info.options_len = sizeof(*md);
}
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;

View file

@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->mem_start = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][0][0]);
dev->mem_end = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+ BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
dev->base_addr = card->pci_conf;
dev->irq = card->irq;

View file

@ -403,10 +403,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
if (match) {
if (AR_SREV_9287(ah)) {
/* FIXME: array overrun? */
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_9287[idxL].pwrPdg[i],
data_9287[idxL].vpdPdg[i],
@ -416,7 +415,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else if (eeprom_4k) {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_4k[idxL].pwrPdg[i],
data_4k[idxL].vpdPdg[i],
@ -426,7 +425,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_def[idxL].pwrPdg[i],
data_def[idxL].vpdPdg[i],

View file

@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = {
.out_num = 2,
.out_sizes = { 4, 8, },
.out_sizes = { 4, 128, },
},
[ND_CMD_SMART_THRESHOLD] = {
.out_num = 2,

View file

@ -275,7 +275,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
} else {
/* from init we validate */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -EINVAL;
return -ENODEV;
}
/*

View file

@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
int stschg_irq; /* card-status-change irq */
int card_irq; /* card irq */
int eject_irq; /* db1200/pb1200 have these */
int insert_gpio; /* db1000 carddetect gpio */
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
/* carddetect gpio: low-active */
static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
{
return !gpio_get_value(irq_to_gpio(sock->insert_irq));
return !gpio_get_value(sock->insert_gpio);
}
static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
sock->card_irq = r ? r->start : 0;
/* insert: irq which triggers on card insertion/ejection */
/* insert: irq which triggers on card insertion/ejection
* BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
*/
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
sock->insert_irq = r ? r->start : -1;
if (sock->board_type == BOARD_TYPE_DEFAULT) {
sock->insert_gpio = r ? r->start : -1;
sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
}
/* stschg: irq which trigger on card status change (optional) */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");

View file

@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (of_property_read_bool(dev_np, "fsl,input-sel")) {
np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
if (np) {
ipctl->input_sel_base = of_iomap(np, 0);
if (IS_ERR(ipctl->input_sel_base)) {
of_node_put(np);
dev_err(&pdev->dev,
"iomuxc input select base address not found\n");
return PTR_ERR(ipctl->input_sel_base);
}
} else {
if (!np) {
dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
return -EINVAL;
}
ipctl->input_sel_base = of_iomap(np, 0);
of_node_put(np);
if (!ipctl->input_sel_base) {
dev_err(&pdev->dev,
"iomuxc input select base address not found\n");
return -ENOMEM;
}
}
imx_pinctrl_desc.name = dev_name(&pdev->dev);

View file

@ -995,7 +995,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
int val;
if (pull)
pullidx = data_out ? 1 : 2;
pullidx = data_out ? 2 : 1;
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
gpio,

View file

@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
"mfio83",
};
static const char * const pistachio_sys_pll_lock_groups[] = {
static const char * const pistachio_audio_pll_lock_groups[] = {
"mfio84",
};
static const char * const pistachio_wifi_pll_lock_groups[] = {
static const char * const pistachio_rpu_v_pll_lock_groups[] = {
"mfio85",
};
static const char * const pistachio_bt_pll_lock_groups[] = {
static const char * const pistachio_rpu_l_pll_lock_groups[] = {
"mfio86",
};
static const char * const pistachio_rpu_v_pll_lock_groups[] = {
static const char * const pistachio_sys_pll_lock_groups[] = {
"mfio87",
};
static const char * const pistachio_rpu_l_pll_lock_groups[] = {
static const char * const pistachio_wifi_pll_lock_groups[] = {
"mfio88",
};
static const char * const pistachio_audio_pll_lock_groups[] = {
static const char * const pistachio_bt_pll_lock_groups[] = {
"mfio89",
};
@ -559,12 +559,12 @@ enum pistachio_mux_option {
PISTACHIO_FUNCTION_DREQ4,
PISTACHIO_FUNCTION_DREQ5,
PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
PISTACHIO_FUNCTION_SYS_PLL_LOCK,
PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
PISTACHIO_FUNCTION_BT_PLL_LOCK,
PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
FUNCTION(dreq4),
FUNCTION(dreq5),
FUNCTION(mips_pll_lock),
FUNCTION(audio_pll_lock),
FUNCTION(rpu_v_pll_lock),
FUNCTION(rpu_l_pll_lock),
FUNCTION(sys_pll_lock),
FUNCTION(wifi_pll_lock),
FUNCTION(bt_pll_lock),
FUNCTION(rpu_v_pll_lock),
FUNCTION(rpu_l_pll_lock),
FUNCTION(audio_pll_lock),
FUNCTION(debug_raw_cca_ind),
FUNCTION(debug_ed_sec20_cca_ind),
FUNCTION(debug_ed_sec40_cca_ind),

View file

@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
return ret;
}
pinctrl_provide_dummies();
/* Enable dummy states for those platforms without pinctrl support */
if (!of_have_populated_dt())
pinctrl_provide_dummies();
ret = sh_pfc_init_ranges(pfc);
if (ret < 0)

View file

@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.pins = sun8i_a33_pins,
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
.irq_bank_base = 1,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)

View file

@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
u32 regval;
@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
static void sunxi_pinctrl_irq_ack(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 status_reg = sunxi_irq_status_reg(d->hwirq);
u32 status_reg = sunxi_irq_status_reg(d->hwirq,
pctl->desc->irq_bank_base);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
/* Clear the IRQ */
@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (bank == pctl->desc->irq_banks)
return;
reg = sunxi_irq_status_reg_from_bank(bank);
reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
val = readl(pctl->membase + reg);
if (val) {
@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
for (i = 0; i < pctl->desc->irq_banks; i++) {
/* Mask and clear all IRQs before registering a handler */
writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
pctl->desc->irq_bank_base));
writel(0xffffffff,
pctl->membase + sunxi_irq_status_reg_from_bank(i));
pctl->membase + sunxi_irq_status_reg_from_bank(i,
pctl->desc->irq_bank_base));
irq_set_chained_handler_and_data(pctl->irq[i],
sunxi_pinctrl_irq_handler,

View file

@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
int npins;
unsigned pin_base;
unsigned irq_banks;
unsigned irq_bank_base;
bool irq_read_needs_mux;
};
@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
return pin_num * PULL_PINS_BITS;
}
static inline u32 sunxi_irq_cfg_reg(u16 irq)
static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
}
static inline u32 sunxi_irq_cfg_offset(u16 irq)
@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
return irq_num * IRQ_CFG_IRQ_BITS;
}
static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
{
return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
static inline u32 sunxi_irq_ctrl_reg(u16 irq)
static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
return sunxi_irq_ctrl_reg_from_bank(bank);
return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
{
return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
static inline u32 sunxi_irq_status_reg(u16 irq)
static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
return sunxi_irq_status_reg_from_bank(bank);
return sunxi_irq_status_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_status_offset(u16 irq)

View file

@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
int diskinfo[4];
/* default to most commonly used values */
diskinfo[0] = 0x40; /* 1 << 6 */
diskinfo[1] = 0x20; /* 1 << 5 */
diskinfo[2] = sdkp->capacity >> 11;
diskinfo[0] = 0x40; /* 1 << 6 */
diskinfo[1] = 0x20; /* 1 << 5 */
diskinfo[2] = capacity >> 11;
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
else
scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
scsicam_bios_param(bdev, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@ -2337,14 +2338,6 @@ got_data:
if (sdkp->capacity > 0xffffffff)
sdp->use_16_for_rw = 1;
/* Rescale capacity to 512-byte units */
if (sector_size == 4096)
sdkp->capacity <<= 3;
else if (sector_size == 2048)
sdkp->capacity <<= 2;
else if (sector_size == 1024)
sdkp->capacity <<= 1;
blk_queue_physical_block_size(sdp->request_queue,
sdkp->physical_block_size);
sdkp->device->sector_size = sector_size;
@ -2812,11 +2805,6 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
return 0;
}
static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
{
return blocks << (ilog2(sdev->sector_size) - 9);
}
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@ -2900,7 +2888,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
set_capacity(disk, sdkp->capacity);
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);

View file

@ -65,7 +65,7 @@ struct scsi_disk {
struct device dev;
struct gendisk *disk;
atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
return 0;
}
static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
{
return blocks << (ilog2(sdev->sector_size) - 9);
}
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:

View file

@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
sg_dma_len(sg) = sg->length;
}
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);

View file

@ -5392,6 +5392,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
bos = udev->bos;
udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
@ -5484,11 +5485,8 @@ done:
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
/* release the new BOS descriptor allocated by hub_port_init() */
if (udev->bos != bos) {
usb_release_bos_descriptor(udev);
udev->bos = bos;
}
usb_release_bos_descriptor(udev);
udev->bos = bos;
return 0;
re_enumerate:

View file

@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
goto __usbhs_pkt_handler_end;
}
ret = func(pkt, &is_done);
if (likely(func))
ret = func(pkt, &is_done);
if (is_done)
__usbhsf_pkt_del(pkt);
@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len;
usbhsf_tx_irq_ctrl(pipe, 0);
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);

View file

@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
unsigned long flags;
ureq->req.actual = pkt->actual;
usbhsg_queue_pop(uep, ureq, 0);
usbhs_lock(priv, flags);
if (uep)
__usbhsg_queue_pop(uep, ureq, 0);
usbhs_unlock(priv, flags);
}
static void usbhsg_queue_push(struct usbhsg_uep *uep,

View file

@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
* Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
* Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
static int uas_target_alloc(struct scsi_target *starget)
{
struct uas_dev_info *devinfo = (struct uas_dev_info *)
dev_to_shost(starget->dev.parent)->hostdata;
if (devinfo->flags & US_FL_NO_REPORT_LUNS)
starget->no_report_luns = 1;
return 0;
}
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@ -808,6 +818,7 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
.target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
/*
* 1 tag is reserved for untagged commands +
* 1 tag to avoid off by one errors in some bridge firmwares
*/
shost->can_queue = devinfo->qdepth - 2;
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)

View file

@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
"Expansion Desk",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",

View file

@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
US_FL_MAX_SECTORS_240);
US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
case 'j':
f |= US_FL_NO_REPORT_LUNS;
break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;

View file

@ -17,6 +17,7 @@
*
*/
#include <linux/delay.h>
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
vp_iowrite8(0, &vp_dev->common->device_status);
/* Flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any. */
vp_ioread8(&vp_dev->common->device_status);
/* After writing 0 to device_status, the driver MUST wait for a read of
* device_status to return 0 before reinitializing the device.
* This will flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any.
*/
while (vp_ioread8(&vp_dev->common->device_status))
msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}

View file

@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
irq_move_irq(data);
if (!VALID_EVTCHN(evtchn))
return;
if (VALID_EVTCHN(evtchn))
if (unlikely(irqd_is_setaffinity_pending(data))) {
int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn);
irq_move_masked_irq(data);
if (!masked)
unmask_evtchn(evtchn);
} else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
irq_move_irq(data);
if (!VALID_EVTCHN(evtchn))
return;
if (VALID_EVTCHN(evtchn))
if (unlikely(irqd_is_setaffinity_pending(data))) {
int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn);
irq_move_masked_irq(data);
if (!masked)
unmask_evtchn(evtchn);
} else
clear_evtchn(evtchn);
}

View file

@ -1885,7 +1885,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;

View file

@ -4406,6 +4406,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
return ret;
}
/*
* When we are logging a new inode X, check if it doesn't have a reference that
* matches the reference from some other inode Y created in a past transaction
* and that was renamed in the current transaction. If we don't do this, then at
* log replay time we can lose inode Y (and all its files if it's a directory):
*
* mkdir /mnt/x
* echo "hello world" > /mnt/x/foobar
* sync
* mv /mnt/x /mnt/y
* mkdir /mnt/x # or touch /mnt/x
* xfs_io -c fsync /mnt/x
* <power fail>
* mount fs, trigger log replay
*
* After the log replay procedure, we would lose the first directory and all its
* files (file foobar).
* For the case where inode Y is not a directory we simply end up losing it:
*
* echo "123" > /mnt/foo
* sync
* mv /mnt/foo /mnt/bar
* echo "abc" > /mnt/foo
* xfs_io -c fsync /mnt/foo
* <power fail>
*
* We also need this for cases where a snapshot entry is replaced by some other
* entry (file or directory) otherwise we end up with an unreplayable log due to
* attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
* if it were a regular entry:
*
* mkdir /mnt/x
* btrfs subvolume snapshot /mnt /mnt/x/snap
* btrfs subvolume delete /mnt/x/snap
* rmdir /mnt/x
* mkdir /mnt/x
* fsync /mnt/x or fsync some new file inside it
* <power fail>
*
* The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
* the same transaction.
*/
static int btrfs_check_ref_name_override(struct extent_buffer *eb,
const int slot,
const struct btrfs_key *key,
struct inode *inode)
{
int ret;
struct btrfs_path *search_path;
char *name = NULL;
u32 name_len = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
u32 cur_offset = 0;
unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
search_path = btrfs_alloc_path();
if (!search_path)
return -ENOMEM;
search_path->search_commit_root = 1;
search_path->skip_locking = 1;
while (cur_offset < item_size) {
u64 parent;
u32 this_name_len;
u32 this_len;
unsigned long name_ptr;
struct btrfs_dir_item *di;
if (key->type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *iref;
iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
parent = key->offset;
this_name_len = btrfs_inode_ref_name_len(eb, iref);
name_ptr = (unsigned long)(iref + 1);
this_len = sizeof(*iref) + this_name_len;
} else {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)(ptr +
cur_offset);
parent = btrfs_inode_extref_parent(eb, extref);
this_name_len = btrfs_inode_extref_name_len(eb, extref);
name_ptr = (unsigned long)&extref->name;
this_len = sizeof(*extref) + this_name_len;
}
if (this_name_len > name_len) {
char *new_name;
new_name = krealloc(name, this_name_len, GFP_NOFS);
if (!new_name) {
ret = -ENOMEM;
goto out;
}
name_len = this_name_len;
name = new_name;
}
read_extent_buffer(eb, name, name_ptr, this_name_len);
di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
search_path, parent,
name, this_name_len, 0);
if (di && !IS_ERR(di)) {
ret = 1;
goto out;
} else if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto out;
}
btrfs_release_path(search_path);
cur_offset += this_len;
}
ret = 0;
out:
btrfs_free_path(search_path);
kfree(name);
return ret;
}
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@ -4578,6 +4699,22 @@ again:
if (min_key.type == BTRFS_INODE_ITEM_KEY)
need_log_inode_item = false;
if ((min_key.type == BTRFS_INODE_REF_KEY ||
min_key.type == BTRFS_INODE_EXTREF_KEY) &&
BTRFS_I(inode)->generation == trans->transid) {
ret = btrfs_check_ref_name_override(path->nodes[0],
path->slots[0],
&min_key, inode);
if (ret < 0) {
err = ret;
goto out_unlock;
} else if (ret > 0) {
err = 1;
btrfs_set_log_full_commit(root->fs_info, trans);
goto out_unlock;
}
}
/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
if (ins_nr == 0)

View file

@ -1666,7 +1666,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
DCACHE_OP_REVALIDATE |
DCACHE_OP_WEAK_REVALIDATE |
DCACHE_OP_DELETE |
DCACHE_OP_SELECT_INODE));
DCACHE_OP_SELECT_INODE |
DCACHE_OP_REAL));
dentry->d_op = op;
if (!op)
return;
@ -1684,6 +1685,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_PRUNE;
if (op->d_select_inode)
dentry->d_flags |= DCACHE_OP_SELECT_INODE;
if (op->d_real)
dentry->d_flags |= DCACHE_OP_REAL;
}
EXPORT_SYMBOL(d_set_d_op);

View file

@ -849,6 +849,29 @@ do { \
#include "extents_status.h"
/*
* Lock subclasses for i_data_sem in the ext4_inode_info structure.
*
* These are needed to avoid lockdep false positives when we need to
* allocate blocks to the quota inode during ext4_map_blocks(), while
* holding i_data_sem for a normal (non-quota) inode. Since we don't
* do quota tracking for the quota inode, this avoids deadlock (as
* well as infinite recursion, since it isn't turtles all the way
* down...)
*
* I_DATA_SEM_NORMAL - Used for most inodes
* I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
* where the second inode has larger inode number
* than the first
* I_DATA_SEM_QUOTA - Used for quota inodes only
*/
enum {
I_DATA_SEM_NORMAL = 0,
I_DATA_SEM_OTHER,
I_DATA_SEM_QUOTA,
};
/*
* fourth extended file system inode data in memory
*/

View file

@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
{
if (first < second) {
down_write(&EXT4_I(first)->i_data_sem);
down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
} else {
down_write(&EXT4_I(second)->i_data_sem);
down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
}
}
@ -483,6 +483,13 @@ mext_check_arguments(struct inode *orig_inode,
return -EBUSY;
}
if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
"not be quota files [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EBUSY;
}
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "

View file

@ -1292,9 +1292,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
return -1;
}
if (ext4_has_feature_quota(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
"when QUOTA feature is enabled");
return -1;
ext4_msg(sb, KERN_INFO, "Journaled quota options "
"ignored when QUOTA feature is enabled");
return 1;
}
qname = match_strdup(args);
if (!qname) {
@ -1657,10 +1657,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
if (ext4_has_feature_quota(sb)) {
ext4_msg(sb, KERN_ERR,
"Cannot set journaled quota options "
ext4_msg(sb, KERN_INFO,
"Quota format mount options ignored "
"when QUOTA feature is enabled");
return -1;
return 1;
}
sbi->s_jquota_fmt = m->mount_opt;
#endif
@ -1721,11 +1721,11 @@ static int parse_options(char *options, struct super_block *sb,
#ifdef CONFIG_QUOTA
if (ext4_has_feature_quota(sb) &&
(test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
"feature is enabled");
return 0;
}
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
"mount options ignored.");
clear_opt(sb, USRQUOTA);
clear_opt(sb, GRPQUOTA);
} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@ -4936,6 +4936,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
EXT4_SB(sb)->s_jquota_fmt, type);
}
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
struct ext4_inode_info *ei = EXT4_I(inode);
/* The first argument of lockdep_set_subclass has to be
* *exactly* the same as the argument to init_rwsem() --- in
* this case, in init_once() --- or lockdep gets unhappy
* because the name of the lock is set using the
* stringification of the argument to init_rwsem().
*/
(void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
lockdep_set_subclass(&ei->i_data_sem, subclass);
}
/*
* Standard function to be called on quota_on
*/
@ -4975,8 +4989,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
}
return dquot_quota_on(sb, type, format_id, path);
lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
err = dquot_quota_on(sb, type, format_id, path);
if (err)
lockdep_set_quota_inode(path->dentry->d_inode,
I_DATA_SEM_NORMAL);
return err;
}
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@ -5002,8 +5020,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
iput(qf_inode);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
return err;
}

View file

@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
/* We requested READDIRPLUS, but the server doesn't grok it */
@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
if (desc->plus != 0)
nfs_prime_dcache(desc->file->f_path.dentry, entry);
nfs_prime_dcache(file_dentry(desc->file), entry);
status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
*/
static int nfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;

View file

@ -927,7 +927,7 @@ int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);

View file

@ -26,7 +26,7 @@ static int
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = filp->f_path.dentry;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;

Some files were not shown because too many files have changed in this diff Show more