Merge android-4.4.115 (aa856bd) into msm-4.4

* refs/heads/tmp-aa856bd
  Linux 4.4.115
  spi: imx: do not access registers while clocks disabled
  serial: imx: Only wakeup via RTSDEN bit if the system has RTS/CTS
  selinux: general protection fault in sock_has_perm
  usb: uas: unconditionally bring back host after reset
  usb: f_fs: Prevent gadget unbind if it is already unbound
  USB: serial: simple: add Motorola Tetra driver
  usbip: list: don't list devices attached to vhci_hcd
  usbip: prevent bind loops on devices attached to vhci_hcd
  USB: serial: io_edgeport: fix possible sleep-in-atomic
  CDC-ACM: apply quirk for card reader
  USB: cdc-acm: Do not log urb submission errors on disconnect
  USB: serial: pl2303: new device id for Chilitag
  usb: option: Add support for FS040U modem
  staging: rtl8188eu: Fix incorrect response to SIOCGIWESSID
  usb: gadget: don't dereference g until after it has been null checked
  media: usbtv: add a new usbid
  scsi: ufs: ufshcd: fix potential NULL pointer dereference in ufshcd_config_vreg
  scsi: aacraid: Prevent crash in case of free interrupt during scsi EH path
  xfs: ubsan fixes
  drm/omap: Fix error handling path in 'omap_dmm_probe()'
  kmemleak: add scheduling point to kmemleak_scan()
  SUNRPC: Allow connect to return EHOSTUNREACH
  quota: Check for register_shrinker() failure.
  net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit
  hwmon: (pmbus) Use 64bit math for DIRECT format values
  lockd: fix "list_add double add" caused by legacy signal interface
  nfsd: check for use of the closed special stateid
  grace: replace BUG_ON by WARN_ONCE in exit_net hook
  nfsd: Ensure we check stateid validity in the seqid operation checks
  nfsd: CLOSE SHOULD return the invalid special stateid for NFSv4.x (x>0)
  xen-netfront: remove warning when unloading module
  KVM: VMX: Fix rflags cache during vCPU reset
  btrfs: fix deadlock when writing out space cache
  mac80211: fix the update of path metric for RANN frame
  openvswitch: fix the incorrect flow action alloc size
  drm/amdkfd: Fix SDMA oversubsription handling
  drm/amdkfd: Fix SDMA ring buffer size calculation
  drm/amdgpu: Fix SDMA load/unload sequence on HWS disabled mode
  bcache: check return value of register_shrinker
  cpufreq: Add Loongson machine dependencies
  ACPI / bus: Leave modalias empty for devices which are not present
  KVM: x86: ioapic: Preserve read-only values in the redirection table
  KVM: x86: ioapic: Clear Remote IRR when entry is switched to edge-triggered
  KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race
  KVM: X86: Fix operand/address-size during instruction decoding
  KVM: x86: Don't re-execute instruction when not passing CR2 value
  KVM: x86: emulator: Return to user-mode on L1 CPL=0 emulation failure
  igb: Free IRQs when device is hotplugged
  mtd: nand: denali_pci: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE
  gpio: ath79: add missing MODULE_DESCRIPTION/LICENSE
  gpio: iop: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE
  power: reset: zx-reboot: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE
  crypto: af_alg - whitelist mask and type
  crypto: aesni - handle zero length dst buffer
  ALSA: seq: Make ioctls race-free
  kaiser: fix intel_bts perf crashes
  x86/pti: Make unpoison of pgd for trusted boot work for real
  bpf: reject stores into ctx via st and xadd
  bpf: fix 32-bit divide by zero
  bpf: fix divides by zero
  bpf: avoid false sharing of map refcount with max_entries
  bpf: arsh is not supported in 32 bit alu thus reject it
  bpf: introduce BPF_JIT_ALWAYS_ON config
  bpf: fix bpf_tail_call() x64 JIT
  x86: bpf_jit: small optimization in emit_bpf_tail_call()
  bpf: fix branch pruning logic
  loop: fix concurrent lo_open/lo_release
  ANDROID: sdcardfs: Protect set_top
  ANDROID: fsnotify: Notify lower fs of open
  Revert "ANDROID: sdcardfs: notify lower file of opens"
  ANDROID: sdcardfs: Use lower getattr times/size
  ANDROID: sched/rt: schedtune: Add boost retention to RT

Conflicts:
	arch/x86/Kconfig
	kernel/sched/rt.c

Change-Id: I91b08e1b8e0a1c6ca9c245597acad0bf197f9527
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2018-02-05 11:56:41 +05:30
commit 2fd547e8d1
79 changed files with 711 additions and 183 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 114
SUBLEVEL = 115
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -61,6 +61,7 @@ config ARM64
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_EBPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE

View file

@ -124,6 +124,7 @@ config S390
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK

View file

@ -93,6 +93,7 @@ config X86
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_EBPF_JIT if X86_64
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL

View file

@ -965,7 +965,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
if (sg_is_last(req->src) &&
req->src->offset + req->src->length <= PAGE_SIZE &&
sg_is_last(req->dst) &&
sg_is_last(req->dst) && req->dst->length &&
req->dst->offset + req->dst->length <= PAGE_SIZE) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);

View file

@ -998,7 +998,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
static inline int emulate_instruction(struct kvm_vcpu *vcpu,
int emulation_type)
{
return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
return x86_emulate_instruction(vcpu, 0,
emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
}
void kvm_enable_efer_bits(u64);

View file

@ -22,6 +22,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/coredump.h>
#include <linux/kaiser.h>
#include <asm-generic/sizes.h>
#include <asm/perf_event.h>
@ -67,6 +68,23 @@ static size_t buf_size(struct page *page)
return 1 << (PAGE_SHIFT + page_private(page));
}
static void bts_buffer_free_aux(void *data)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
struct bts_buffer *buf = data;
int nbuf;
for (nbuf = 0; nbuf < buf->nr_bufs; nbuf++) {
struct page *page = buf->buf[nbuf].page;
void *kaddr = page_address(page);
size_t page_size = buf_size(page);
kaiser_remove_mapping((unsigned long)kaddr, page_size);
}
#endif
kfree(data);
}
static void *
bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
{
@ -103,29 +121,33 @@ bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
buf->real_size = size - size % BTS_RECORD_SIZE;
for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
unsigned int __nr_pages;
void *kaddr = pages[pg];
size_t page_size;
page = virt_to_page(kaddr);
page_size = buf_size(page);
if (kaiser_add_mapping((unsigned long)kaddr,
page_size, __PAGE_KERNEL) < 0) {
buf->nr_bufs = nbuf;
bts_buffer_free_aux(buf);
return NULL;
}
page = virt_to_page(pages[pg]);
__nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
buf->buf[nbuf].page = page;
buf->buf[nbuf].offset = offset;
buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
buf->buf[nbuf].size = page_size - buf->buf[nbuf].displacement;
pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
buf->buf[nbuf].size -= pad;
pg += __nr_pages;
offset += __nr_pages << PAGE_SHIFT;
pg += page_size >> PAGE_SHIFT;
offset += page_size;
}
return buf;
}
static void bts_buffer_free_aux(void *data)
{
kfree(data);
}
static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
{
return buf->buf[idx].offset + buf->buf[idx].displacement;

View file

@ -140,6 +140,16 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
return -1;
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
pte_unmap(pte);
/*
* PTI poisons low addresses in the kernel page tables in the
* name of making them unusable for userspace. To execute
* code at such a low address, the poison must be cleared.
*
* Note: 'pgd' actually gets set in pud_alloc().
*/
pgd->pgd &= ~_PAGE_NX;
return 0;
}

View file

@ -4978,6 +4978,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
u16 dummy;
struct desc_struct desc;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
@ -4996,6 +4998,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
def_op_bytes = def_ad_bytes = 2;
ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
if (desc.d)
def_op_bytes = def_ad_bytes = 4;
break;
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;

View file

@ -247,8 +247,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
index == RTC_GSI) {
if (kvm_apic_match_dest(vcpu, NULL, 0,
e->fields.dest_id, e->fields.dest_mode) ||
(e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
kvm_apic_pending_eoi(vcpu, e->fields.vector)))
kvm_apic_pending_eoi(vcpu, e->fields.vector))
__set_bit(e->fields.vector,
(unsigned long *)eoi_exit_bitmap);
}
@ -269,6 +268,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
bool mask_before, mask_after;
int old_remote_irr, old_delivery_status;
union kvm_ioapic_redirect_entry *e;
switch (ioapic->ioregsel) {
@ -291,14 +291,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
return;
e = &ioapic->redirtbl[index];
mask_before = e->fields.mask;
/* Preserve read-only fields */
old_remote_irr = e->fields.remote_irr;
old_delivery_status = e->fields.delivery_status;
if (ioapic->ioregsel & 1) {
e->bits &= 0xffffffff;
e->bits |= (u64) val << 32;
} else {
e->bits &= ~0xffffffffULL;
e->bits |= (u32) val;
e->fields.remote_irr = 0;
}
e->fields.remote_irr = old_remote_irr;
e->fields.delivery_status = old_delivery_status;
/*
* Some OSes (Linux, Xen) assume that Remote IRR bit will
* be cleared by IOAPIC hardware when the entry is configured
* as edge-triggered. This behavior is used to simulate an
* explicit EOI on IOAPICs that don't have the EOI register.
*/
if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
e->fields.remote_irr = 0;
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);

View file

@ -4954,7 +4954,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
}
vmcs_writel(GUEST_RFLAGS, 0x02);
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0xfff0);
vmcs_writel(GUEST_GDTR_BASE, 0);
@ -6023,7 +6023,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
return 1;
err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
err = emulate_instruction(vcpu, 0);
if (err == EMULATE_USER_EXIT) {
++vcpu->stat.mmio_exits;

View file

@ -5153,7 +5153,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
r = EMULATE_USER_EXIT;
}
kvm_queue_exception(vcpu, UD_VECTOR);

View file

@ -266,10 +266,10 @@ static void emit_bpf_tail_call(u8 **pprog)
/* if (index >= array->map.max_entries)
* goto out;
*/
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
#define OFFSET1 47 /* number of bytes to jump */
#define OFFSET1 43 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@ -278,21 +278,20 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 36
#define OFFSET2 32
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
/* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
offsetof(struct bpf_array, ptrs));
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL)
* goto out;
*/
EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
#define OFFSET3 10
EMIT2(X86_JE, OFFSET3); /* je out */
label3 = cnt;

View file

@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
const u32 forbidden = CRYPTO_ALG_INTERNAL;
const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@ -157,6 +157,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;
/* If caller uses non-allowed flag, return error. */
if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
return -EINVAL;
if (sock->state == SS_CONNECTED)
return -EINVAL;
@ -175,9 +179,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(type))
return PTR_ERR(type);
private = type->bind(sa->salg_name,
sa->salg_feat & ~forbidden,
sa->salg_mask & ~forbidden);
private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);

View file

@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
int count;
struct acpi_hardware_id *id;
/* Avoid unnecessarily loading modules for non present devices. */
if (!acpi_device_is_present(acpi_dev))
return 0;
/*
* Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
* be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the

View file

@ -1569,9 +1569,8 @@ out:
return err;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
static void __lo_release(struct loop_device *lo)
{
struct loop_device *lo = disk->private_data;
int err;
if (atomic_dec_return(&lo->lo_refcnt))
@ -1597,6 +1596,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&lo->lo_ctl_mutex);
}
static void lo_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&loop_index_mutex);
__lo_release(disk->private_data);
mutex_unlock(&loop_index_mutex);
}
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.open = lo_open,

View file

@ -305,6 +305,7 @@ endif
if MIPS
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
depends on LEMOTE_MACH2F
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
@ -317,6 +318,7 @@ config LOONGSON2_CPUFREQ
config LOONGSON1_CPUFREQ
tristate "Loongson1 CPUFreq Driver"
depends on LOONGSON1_LS1B
help
This option adds a CPUFreq driver for loongson1 processors which
support software configurable cpu frequency.

View file

@ -203,3 +203,6 @@ static struct platform_driver ath79_gpio_driver = {
};
module_platform_driver(ath79_gpio_driver);
MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
MODULE_LICENSE("GPL v2");

View file

@ -129,3 +129,7 @@ static int __init iop3xx_gpio_init(void)
return platform_driver_register(&iop3xx_gpio_driver);
}
arch_initcall(iop3xx_gpio_init);
MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_LICENSE("GPL");

View file

@ -367,29 +367,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
uint32_t sdma_base_addr;
uint32_t data;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies))
return -ETIME;
usleep_range(500, 1000);
}
if (m->sdma_engine_id) {
data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
RESUME_CTX, 0);
WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
} else {
data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
RESUME_CTX, 0);
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
m->sdma_rlc_doorbell);
m->sdma_rlc_doorbell);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl);
@ -492,9 +513,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
return 0;
}

View file

@ -215,8 +215,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
BUG_ON(!mm || !mqd || !q);
m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;

View file

@ -205,6 +205,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
if (dev->dqm->queue_count >=
CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
pr_err("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
}
retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&

View file

@ -611,7 +611,8 @@ static int omap_dmm_probe(struct platform_device *dev)
match = of_match_node(dmm_of_match, dev->dev.of_node);
if (!match) {
dev_err(&dev->dev, "failed to find matching device node\n");
return -ENODEV;
ret = -ENODEV;
goto fail;
}
omap_dmm->plat_data = match->data;

View file

@ -20,6 +20,7 @@
*/
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
@ -476,8 +477,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
static long pmbus_reg2data_direct(struct pmbus_data *data,
struct pmbus_sensor *sensor)
{
long val = (s16) sensor->data;
long m, b, R;
s64 b, val = (s16)sensor->data;
s32 m, R;
m = data->info->m[sensor->class];
b = data->info->b[sensor->class];
@ -505,11 +506,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
R--;
}
while (R < 0) {
val = DIV_ROUND_CLOSEST(val, 10);
val = div_s64(val + 5LL, 10L); /* round closest */
R++;
}
return (val - b) / m;
val = div_s64(val - b, m);
return clamp_val(val, LONG_MIN, LONG_MAX);
}
/*
@ -629,7 +631,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
struct pmbus_sensor *sensor, long val)
{
long m, b, R;
s64 b, val64 = val;
s32 m, R;
m = data->info->m[sensor->class];
b = data->info->b[sensor->class];
@ -646,18 +649,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
val = val * m + b;
val64 = val64 * m + b;
while (R > 0) {
val *= 10;
val64 *= 10;
R--;
}
while (R < 0) {
val = DIV_ROUND_CLOSEST(val, 10);
val64 = div_s64(val64 + 5LL, 10L); /* round closest */
R++;
}
return val;
return (u16)clamp_val(val64, S16_MIN, S16_MAX);
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,

View file

@ -808,7 +808,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
register_shrinker(&c->shrink);
if (register_shrinker(&c->shrink))
pr_warn("bcache: %s: could not register shrinker",
__func__);
return 0;
}

View file

@ -127,6 +127,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
static struct usb_device_id usbtv_id_table[] = {
{ USB_DEVICE(0x1b71, 0x3002) },
{ USB_DEVICE(0x1f71, 0x3301) },
{}
};
MODULE_DEVICE_TABLE(usb, usbtv_id_table);

View file

@ -119,3 +119,7 @@ static struct pci_driver denali_pci_driver = {
};
module_pci_driver(denali_pci_driver);
MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
MODULE_AUTHOR("Intel Corporation and its suppliers");
MODULE_LICENSE("GPL v2");

View file

@ -3174,7 +3174,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
static int igb_close(struct net_device *netdev)
{
if (netif_device_present(netdev))
if (netif_device_present(netdev) || netdev->dismantle)
return __igb_close(netdev, false);
return 0;
}

View file

@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
depends on (PPC || MICROBLAZE)
depends on !64BIT || BROKEN
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC

View file

@ -86,6 +86,8 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
struct netfront_stats {
u64 packets;
u64 bytes;
@ -2037,10 +2039,12 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateClosed:
wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
@ -2146,6 +2150,20 @@ static int xennet_remove(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
xenbus_switch_state(dev, XenbusStateClosing);
wait_event(module_unload_q,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing);
xenbus_switch_state(dev, XenbusStateClosed);
wait_event(module_unload_q,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown);
}
xennet_disconnect_backend(info);
unregister_netdev(info->netdev);

View file

@ -78,3 +78,7 @@ static struct platform_driver zx_reboot_driver = {
},
};
module_platform_driver(zx_reboot_driver);
MODULE_DESCRIPTION("ZTE SoCs reset driver");
MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
MODULE_LICENSE("GPL v2");

View file

@ -1363,13 +1363,13 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
* will ensure that i/o is queisced and the card is flushed in that
* case.
*/
aac_free_irq(aac);
aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
aac->comm_addr = NULL;
aac->comm_phys = 0;
kfree(aac->queues);
aac->queues = NULL;
aac_free_irq(aac);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
quirks = aac_get_driver_ident(index)->quirks;

View file

@ -7986,12 +7986,15 @@ static int ufshcd_config_vreg(struct device *dev,
struct ufs_vreg *vreg, bool on)
{
int ret = 0;
struct regulator *reg = vreg->reg;
const char *name = vreg->name;
struct regulator *reg;
const char *name;
int min_uV, uA_load;
BUG_ON(!vreg);
reg = vreg->reg;
name = vreg->name;
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);

View file

@ -1228,12 +1228,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
int ret;
spi_bitbang_stop(&spi_imx->bitbang);
ret = clk_enable(spi_imx->clk_per);
if (ret)
return ret;
ret = clk_enable(spi_imx->clk_ipg);
if (ret) {
clk_disable(spi_imx->clk_per);
return ret;
}
writel(0, spi_imx->base + MXC_CSPICTRL);
clk_unprepare(spi_imx->clk_ipg);
clk_unprepare(spi_imx->clk_per);
clk_disable_unprepare(spi_imx->clk_ipg);
clk_disable_unprepare(spi_imx->clk_per);
spi_imx_sdma_exit(spi_imx);
spi_master_put(master);

View file

@ -1399,19 +1399,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
len = pcur_bss->Ssid.SsidLength;
wrqu->essid.length = len;
memcpy(extra, pcur_bss->Ssid.Ssid, len);
wrqu->essid.flags = 1;
} else {
ret = -1;
goto exit;
len = 0;
*extra = 0;
}
exit:
wrqu->essid.length = len;
wrqu->essid.flags = 1;
return ret;
}

View file

@ -2057,12 +2057,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
val &= ~UCR3_AWAKEN;
writel(val, sport->port.membase + UCR3);
val = readl(sport->port.membase + UCR1);
if (on)
val |= UCR1_RTSDEN;
else
val &= ~UCR1_RTSDEN;
writel(val, sport->port.membase + UCR1);
if (sport->have_rtscts) {
val = readl(sport->port.membase + UCR1);
if (on)
val |= UCR1_RTSDEN;
else
val &= ~UCR1_RTSDEN;
writel(val, sport->port.membase + UCR1);
}
}
static int imx_serial_port_suspend_noirq(struct device *dev)

View file

@ -377,7 +377,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
if (res != -EPERM) {
if (res != -EPERM && res != -ENODEV) {
dev_err(&acm->data->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
@ -1695,6 +1695,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
.driver_info = SINGLE_RX_URB,
},
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},

View file

@ -121,7 +121,6 @@ int config_ep_by_speed(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep)
{
struct usb_composite_dev *cdev = get_gadget_data(g);
struct usb_endpoint_descriptor *chosen_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
@ -193,8 +192,12 @@ ep_found:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
if (comp_desc->bMaxBurst != 0)
if (comp_desc->bMaxBurst != 0) {
struct usb_composite_dev *cdev;
cdev = get_gadget_data(g);
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
}
_ep->maxburst = 1;
break;
}

View file

@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
- Google USB serial devices
- HP4x calculators
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
- Siemens USB/MPI adapter.
- ViVOtech ViVOpay USB device.

View file

@ -2219,7 +2219,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
/* something went wrong */
dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
__func__, status);
usb_kill_urb(urb);
usb_free_urb(urb);
atomic_dec(&CmdUrbs);
return status;

View file

@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb);
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
/* Fujisoft products */
#define FUJISOFT_PRODUCT_FS040U 0x9b02
/* iBall 3.5G connect wireless modem */
#define IBALL_3_5G_CONNECT 0x9605
@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },

View file

@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },

View file

@ -17,6 +17,7 @@
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
#define PL2303_PRODUCT_ID_ALDIGA 0x0611
#define PL2303_PRODUCT_ID_MMX 0x0612
#define PL2303_PRODUCT_ID_GPRS 0x0609

View file

@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
{ USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
#define NOVATEL_IDS() \
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&google_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
&novatel_gps_device,
&hp4x_device,
&suunto_device,
@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = {
GOOGLE_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
NOVATEL_IDS(),
HP4X_IDS(),
SUUNTO_IDS(),

View file

@ -1052,20 +1052,19 @@ static int uas_post_reset(struct usb_interface *intf)
return 0;
err = uas_configure_endpoints(devinfo);
if (err) {
if (err && err != ENODEV)
shost_printk(KERN_ERR, shost,
"%s: alloc streams error %d after reset",
__func__, err);
return 1;
}
/* we must unblock the host in every case lest we deadlock */
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_unblock_requests(shost);
return 0;
return err ? 1 : 0;
}
static int uas_suspend(struct usb_interface *intf, pm_message_t message)

View file

@ -1258,7 +1258,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
/* Lock all pages first so we can lock the extent safely. */
ret = io_ctl_prepare_pages(io_ctl, inode, 0);
if (ret)
goto out;
goto out_unlock;
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state);
@ -1351,6 +1351,7 @@ out_nospc_locked:
out_nospc:
cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
out_unlock:
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);

View file

@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
struct list_head *grace_list = net_generic(net, grace_net_id);
spin_lock(&grace_lock);
list_add(&lm->list, grace_list);
if (list_empty(&lm->list))
list_add(&lm->list, grace_list);
else
WARN(1, "double list_add attempt detected in net %x %s\n",
net->ns.inum, (net == &init_net) ? "(init_net)" : "");
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
BUG_ON(!list_empty(grace_list));
WARN_ONCE(!list_empty(grace_list),
"net %x %s: grace_list is not empty\n",
net->ns.inum, __func__);
}
static struct pernet_operations grace_net_ops = {

View file

@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
static const stateid_t currentstateid = {
.si_generation = 1,
};
static const stateid_t close_stateid = {
.si_generation = 0xffffffffU,
};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
@ -4701,7 +4705,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
struct nfs4_stid *s;
__be32 status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
/* Client debugging aid. */
if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
@ -4759,7 +4764,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
else if (typemask & NFS4_DELEG_STID)
typemask |= NFS4_REVOKED_DELEG_STID;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
@ -5011,15 +5017,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
if (stp->st_stid.sc_type == NFS4_CLOSED_STID
|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
/*
* "Closed" stateid's exist *only* to return
* nfserr_replay_me from the previous step, and
* revoked delegations are kept only for free_stateid.
*/
return nfserr_bad_stateid;
mutex_lock(&stp->st_mutex);
status = nfsd4_lock_ol_stateid(stp);
if (status != nfs_ok)
return status;
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
@ -5243,6 +5243,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
/* See RFC5661 sectionm 18.2.4 */
if (stp->st_stid.sc_client->cl_minorversion)
memcpy(&close->cl_stateid, &close_stateid,
sizeof(close->cl_stateid));
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
out:
@ -6787,6 +6792,10 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
nn->boot_time = get_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
INIT_LIST_HEAD(&nn->nfsd4_manager.list);
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
@ -6841,9 +6850,6 @@ nfs4_state_start_net(struct net *net)
ret = nfs4_state_create_net(net);
if (ret)
return ret;
nn->boot_time = get_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",

View file

@ -2919,7 +2919,8 @@ static int __init dquot_init(void)
pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
register_shrinker(&dqcache_shrinker);
if (register_shrinker(&dqcache_shrinker))
panic("Cannot register dquot shrinker");
return 0;
}

View file

@ -32,23 +32,20 @@ static void inherit_derived_state(struct inode *parent, struct inode *child)
ci->data->under_android = pi->data->under_android;
ci->data->under_cache = pi->data->under_cache;
ci->data->under_obb = pi->data->under_obb;
set_top(ci, pi->top_data);
}
/* helper function for derived state */
void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
uid_t uid, bool under_android,
struct sdcardfs_inode_data *top)
uid_t uid)
{
struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
info->data->perm = perm;
info->data->userid = userid;
info->data->d_uid = uid;
info->data->under_android = under_android;
info->data->under_android = false;
info->data->under_cache = false;
info->data->under_obb = false;
set_top(info, top);
}
/* While renaming, there is a point where we want the path from dentry,
@ -58,8 +55,8 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
const struct qstr *name)
{
struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry));
struct sdcardfs_inode_data *parent_data =
SDCARDFS_I(d_inode(parent))->data;
struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
struct sdcardfs_inode_data *parent_data = parent_info->data;
appid_t appid;
unsigned long user_num;
int err;
@ -80,13 +77,15 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
inherit_derived_state(d_inode(parent), d_inode(dentry));
/* Files don't get special labels */
if (!S_ISDIR(d_inode(dentry)->i_mode))
if (!S_ISDIR(d_inode(dentry)->i_mode)) {
set_top(info, parent_info);
return;
}
/* Derive custom permissions based on parent and current node */
switch (parent_data->perm) {
case PERM_INHERIT:
case PERM_ANDROID_PACKAGE_CACHE:
/* Already inherited above */
set_top(info, parent_info);
break;
case PERM_PRE_ROOT:
/* Legacy internal layout places users at top level */
@ -96,7 +95,6 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
info->data->userid = 0;
else
info->data->userid = user_num;
set_top(info, info->data);
break;
case PERM_ROOT:
/* Assume masked off by default. */
@ -104,24 +102,24 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID;
info->data->under_android = true;
set_top(info, info->data);
} else {
set_top(info, parent_info);
}
break;
case PERM_ANDROID:
if (qstr_case_eq(name, &q_data)) {
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID_DATA;
set_top(info, info->data);
} else if (qstr_case_eq(name, &q_obb)) {
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID_OBB;
info->data->under_obb = true;
set_top(info, info->data);
/* Single OBB directory is always shared */
} else if (qstr_case_eq(name, &q_media)) {
/* App-specific directories inside; let anyone traverse */
info->data->perm = PERM_ANDROID_MEDIA;
set_top(info, info->data);
} else {
set_top(info, parent_info);
}
break;
case PERM_ANDROID_OBB:
@ -132,13 +130,13 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
if (appid != 0 && !is_excluded(name->name, parent_data->userid))
info->data->d_uid =
multiuser_get_uid(parent_data->userid, appid);
set_top(info, info->data);
break;
case PERM_ANDROID_PACKAGE:
if (qstr_case_eq(name, &q_cache)) {
info->data->perm = PERM_ANDROID_PACKAGE_CACHE;
info->data->under_cache = true;
}
set_top(info, parent_info);
break;
}
}

View file

@ -18,7 +18,6 @@
* General Public License.
*/
#include <linux/fsnotify.h>
#include "sdcardfs.h"
#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
#include <linux/backing-dev.h>
@ -260,7 +259,6 @@ static int sdcardfs_open(struct inode *inode, struct file *file)
fput(lower_file); /* fput calls dput for lower_dentry */
}
} else {
fsnotify_open(lower_file);
sdcardfs_set_lower_file(file, lower_file);
}

View file

@ -816,8 +816,8 @@ out_err:
return err;
}
static int sdcardfs_fillattr(struct vfsmount *mnt,
struct inode *inode, struct kstat *stat)
static int sdcardfs_fillattr(struct vfsmount *mnt, struct inode *inode,
struct kstat *lower_stat, struct kstat *stat)
{
struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
struct sdcardfs_inode_data *top = top_data_get(info);
@ -833,12 +833,12 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
stat->uid = make_kuid(&init_user_ns, top->d_uid);
stat->gid = make_kgid(&init_user_ns, get_gid(mnt, sb, top));
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
stat->blksize = (1 << inode->i_blkbits);
stat->blocks = inode->i_blocks;
stat->size = lower_stat->size;
stat->atime = lower_stat->atime;
stat->mtime = lower_stat->mtime;
stat->ctime = lower_stat->ctime;
stat->blksize = lower_stat->blksize;
stat->blocks = lower_stat->blocks;
data_put(top);
return 0;
}
@ -864,8 +864,7 @@ static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
goto out;
sdcardfs_copy_and_fix_attrs(d_inode(dentry),
d_inode(lower_path.dentry));
err = sdcardfs_fillattr(mnt, d_inode(dentry), stat);
stat->blocks = lower_stat.blocks;
err = sdcardfs_fillattr(mnt, d_inode(dentry), &lower_stat, stat);
out:
sdcardfs_put_lower_path(dentry, &lower_path);
return err;

View file

@ -341,13 +341,11 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
mutex_lock(&sdcardfs_super_list_lock);
if (sb_info->options.multiuser) {
setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT,
sb_info->options.fs_user_id, AID_ROOT,
false, SDCARDFS_I(d_inode(sb->s_root))->data);
sb_info->options.fs_user_id, AID_ROOT);
snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
} else {
setup_derived_state(d_inode(sb->s_root), PERM_ROOT,
sb_info->options.fs_user_id, AID_ROOT,
false, SDCARDFS_I(d_inode(sb->s_root))->data);
sb_info->options.fs_user_id, AID_ROOT);
snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
}
fixup_tmp_permissions(d_inode(sb->s_root));

View file

@ -201,6 +201,7 @@ struct sdcardfs_inode_info {
struct sdcardfs_inode_data *data;
/* top folder for ownership */
spinlock_t top_lock;
struct sdcardfs_inode_data *top_data;
struct inode vfs_inode;
@ -380,7 +381,12 @@ static inline struct sdcardfs_inode_data *data_get(
static inline struct sdcardfs_inode_data *top_data_get(
struct sdcardfs_inode_info *info)
{
return data_get(info->top_data);
struct sdcardfs_inode_data *top_data;
spin_lock(&info->top_lock);
top_data = data_get(info->top_data);
spin_unlock(&info->top_lock);
return top_data;
}
extern void data_release(struct kref *ref);
@ -402,15 +408,20 @@ static inline void release_own_data(struct sdcardfs_inode_info *info)
}
static inline void set_top(struct sdcardfs_inode_info *info,
struct sdcardfs_inode_data *top)
struct sdcardfs_inode_info *top_owner)
{
struct sdcardfs_inode_data *old_top = info->top_data;
struct sdcardfs_inode_data *old_top;
struct sdcardfs_inode_data *new_top = NULL;
if (top)
data_get(top);
info->top_data = top;
if (top_owner)
new_top = top_data_get(top_owner);
spin_lock(&info->top_lock);
old_top = info->top_data;
info->top_data = new_top;
if (old_top)
data_put(old_top);
spin_unlock(&info->top_lock);
}
static inline int get_gid(struct vfsmount *mnt,
@ -516,8 +527,7 @@ struct limit_search {
};
extern void setup_derived_state(struct inode *inode, perm_t perm,
userid_t userid, uid_t uid, bool under_android,
struct sdcardfs_inode_data *top);
userid_t userid, uid_t uid);
extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name);
extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit);

View file

@ -215,6 +215,9 @@ static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
i->data = d;
kref_init(&d->refcount);
i->top_data = d;
spin_lock_init(&i->top_lock);
kref_get(&d->refcount);
i->vfs_inode.i_version = 1;
return &i->vfs_inode;

View file

@ -310,7 +310,7 @@ xfs_map_blocks(
(ip->i_df.if_flags & XFS_IFEXTENTS));
ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset + count > mp->m_super->s_maxbytes)
if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
@ -1360,7 +1360,7 @@ xfs_map_trim_size(
if (mapping_size > size)
mapping_size = size;
if (offset < i_size_read(inode) &&
offset + mapping_size >= i_size_read(inode)) {
(xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
/* limit mapping to block that spans EOF */
mapping_size = roundup_64(i_size_read(inode) - offset,
i_blocksize(inode));
@ -1416,7 +1416,7 @@ __xfs_get_blocks(
}
ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset + size > mp->m_super->s_maxbytes)
if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
size = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
offset_fsb = XFS_B_TO_FSBT(mp, offset);

View file

@ -31,17 +31,25 @@ struct bpf_map_ops {
};
struct bpf_map {
atomic_t refcnt;
/* 1st cacheline with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
*/
const struct bpf_map_ops *ops ____cacheline_aligned;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 pages;
bool unpriv_array;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
/* 7 bytes hole */
/* 2nd cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
struct user_struct *user ____cacheline_aligned;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
};
struct bpf_map_type_list {

View file

@ -230,12 +230,19 @@ static inline void fsnotify_modify(struct file *file)
static inline void fsnotify_open(struct file *file)
{
struct path *path = &file->f_path;
struct path lower_path;
struct inode *inode = file_inode(file);
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
if (path->dentry->d_op && path->dentry->d_op->d_canonical_path) {
path->dentry->d_op->d_canonical_path(path, &lower_path);
fsnotify_parent(&lower_path, NULL, mask);
fsnotify(lower_path.dentry->d_inode, mask, &lower_path, FSNOTIFY_EVENT_PATH, NULL, 0);
path_put(&lower_path);
}
fsnotify_parent(path, NULL, mask);
fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}

View file

@ -1517,6 +1517,10 @@ struct sched_rt_entity {
unsigned short on_rq;
unsigned short on_list;
/* Accesses for these must be guarded by rq->lock of the task's rq */
bool schedtune_enqueued;
struct hrtimer schedtune_timer;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;

View file

@ -1674,6 +1674,13 @@ config BPF_SYSCALL
Enable the bpf() system call that allows to manipulate eBPF
programs and maps via file descriptors.
config BPF_JIT_ALWAYS_ON
bool "Permanently enable BPF JIT and remove BPF interpreter"
depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
help
Enables BPF JIT and removes BPF interpreter to avoid
speculative execution of BPF instructions by the interpreter
config SHMEM
bool "Use full shmem filesystem" if EXPERT
default y

View file

@ -256,6 +256,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
EXPORT_SYMBOL_GPL(__bpf_call_base);
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/**
* __bpf_prog_run - run eBPF program on a given context
* @ctx: is the data we are operating on
@ -443,7 +444,7 @@ select_insn:
DST = tmp;
CONT;
ALU_MOD_X:
if (unlikely(SRC == 0))
if (unlikely((u32)SRC == 0))
return 0;
tmp = (u32) DST;
DST = do_div(tmp, (u32) SRC);
@ -462,7 +463,7 @@ select_insn:
DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
if (unlikely(SRC == 0))
if (unlikely((u32)SRC == 0))
return 0;
tmp = (u32) DST;
do_div(tmp, (u32) SRC);
@ -517,7 +518,7 @@ select_insn:
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_prog *prog;
u64 index = BPF_R3;
u32 index = BPF_R3;
if (unlikely(index >= array->map.max_entries))
goto out;
@ -725,6 +726,13 @@ load_byte:
return 0;
}
#else
static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
{
return 0;
}
#endif
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
@ -771,9 +779,23 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
*/
int bpf_prog_select_runtime(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
fp->bpf_func = (void *) __bpf_prog_run;
#else
fp->bpf_func = (void *) __bpf_prog_ret0;
#endif
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
* blinding, bpf_int_jit_compile() must always return a
* valid program, which in this case would simply not
* be JITed, but falls back to the interpreter.
*/
bpf_int_jit_compile(fp);
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
if (!fp->jited)
return -ENOTSUPP;
#endif
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at

View file

@ -191,6 +191,7 @@ struct bpf_insn_aux_data {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@ -682,6 +683,13 @@ static bool is_pointer_value(struct verifier_env *env, int regno)
}
}
static bool is_ctx_reg(struct verifier_env *env, int regno)
{
const struct reg_state *reg = &env->cur_state.regs[regno];
return reg->type == PTR_TO_CTX;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@ -778,6 +786,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
return -EACCES;
}
if (is_ctx_reg(env, insn->dst_reg)) {
verbose("BPF_XADD stores into R%d context is not allowed\n",
insn->dst_reg);
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
@ -1164,6 +1178,11 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
verbose("BPF_ARSH not supported for 32 bit ALU\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@ -1793,6 +1812,7 @@ static int do_check(struct verifier_env *env)
print_bpf_insn(env, insn);
}
env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@ -1902,6 +1922,12 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
if (is_ctx_reg(env, insn->dst_reg)) {
verbose("BPF_ST stores into R%d context is not allowed\n",
insn->dst_reg);
return -EACCES;
}
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
@ -1988,6 +2014,7 @@ process_bpf_exit:
return err;
insn_idx++;
env->insn_aux_data[insn_idx].seen = true;
} else {
verbose("invalid BPF_LD mode\n");
return -EINVAL;
@ -2125,6 +2152,7 @@ static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
int i;
if (cnt == 1)
return 0;
@ -2134,6 +2162,8 @@ static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++)
new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@ -2152,6 +2182,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct verifier_env *env, u32 off,
return new_prog;
}
/* The verifier does more data flow analysis than llvm and will not explore
* branches that are dead at run time. Malicious programs can have dead code
* too. Therefore replace all dead at-run-time code with nops.
*/
static void sanitize_dead_code(struct verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
memcpy(insn + i, &nop, sizeof(nop));
}
}
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@ -2218,6 +2267,24 @@ static int fixup_bpf_calls(struct verifier_env *env)
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
/* due to JIT bugs clear upper 32-bits of src register
* before div/mod operation
*/
insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
insn_buf[1] = *insn;
cnt = 2;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
@ -2370,6 +2437,9 @@ skip_full_check:
while (pop_stack(env, NULL) >= 0);
free_states(env);
if (ret == 0)
sanitize_dead_code(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);

View file

@ -2368,6 +2368,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
init_rt_schedtune_timer(&p->rt);
INIT_LIST_HEAD(&p->rt.run_list);
p->rt.timeout = 0;
p->rt.time_slice = sched_rr_timeslice;

View file

@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <trace/events/sched.h>
#include <linux/hrtimer.h>
#include "tune.h"
@ -980,6 +981,70 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
#define RT_SCHEDTUNE_INTERVAL 50000000ULL
static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
{
struct sched_rt_entity *rt_se = container_of(timer,
struct sched_rt_entity,
schedtune_timer);
struct task_struct *p = rt_task_of(rt_se);
struct rq *rq = task_rq(p);
raw_spin_lock(&rq->lock);
/*
* Nothing to do if:
* - task has switched runqueues
* - task isn't RT anymore
*/
if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
goto out;
/*
* If task got enqueued back during callback time, it means we raced
* with the enqueue on another cpu, that's Ok, just do nothing as
* enqueue path would have tried to cancel us and we shouldn't run
* Also check the schedtune_enqueued flag as class-switch on a
* sleeping task may have already canceled the timer and done dq
*/
if (p->on_rq || !rt_se->schedtune_enqueued)
goto out;
/*
* RT task is no longer active, cancel boost
*/
rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
out:
raw_spin_unlock(&rq->lock);
/*
* This can free the task_struct if no more references.
*/
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
{
struct hrtimer *timer = &rt_se->schedtune_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = rt_schedtune_timer;
rt_se->schedtune_enqueued = false;
}
static void start_schedtune_timer(struct sched_rt_entity *rt_se)
{
struct hrtimer *timer = &rt_se->schedtune_timer;
hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
HRTIMER_MODE_REL_PINNED);
}
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@ -1386,7 +1451,32 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
if (!schedtune_task_boost(p))
return;
/*
* If schedtune timer is active, that means a boost was already
* done, just cancel the timer so that deboost doesn't happen.
* Otherwise, increase the boost. If an enqueued timer was
* cancelled, put the task reference.
*/
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
put_task_struct(p);
/*
* schedtune_enqueued can be true in the following situation:
* enqueue_task_rt grabs rq lock before timer fires
* or before its callback acquires rq lock
* schedtune_enqueued can be false if timer callback is running
* and timer just released rq lock, or if the timer finished
* running and canceling the boost
*/
if (rt_se->schedtune_enqueued)
return;
rt_se->schedtune_enqueued = true;
schedtune_enqueue_task(p, cpu_of(rq));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@ -1398,7 +1488,19 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
dec_hmp_sched_stats_rt(rq, p);
dequeue_pushable_task(rq, p);
if (!rt_se->schedtune_enqueued)
return;
if (flags == DEQUEUE_SLEEP) {
get_task_struct(p);
start_schedtune_timer(rt_se);
return;
}
rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
/*
@ -1472,6 +1574,32 @@ task_may_not_preempt(struct task_struct *task, int cpu)
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
/*
* Perform a schedtune dequeue and cancelation of boost timers if needed.
* Should be called only with the rq->lock held.
*/
static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
{
struct sched_rt_entity *rt_se = &p->rt;
BUG_ON(!raw_spin_is_locked(&rq->lock));
if (!rt_se->schedtune_enqueued)
return;
/*
* Incase of class change cancel any active timers. If an enqueued
* timer was cancelled, put the task ref.
*/
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
put_task_struct(p);
/* schedtune_enqueued is true, deboost it */
rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, task_cpu(p));
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
@ -1546,6 +1674,19 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
rcu_read_unlock();
out:
/*
* If previous CPU was different, make sure to cancel any active
* schedtune timers and deboost.
*/
if (task_cpu(p) != cpu) {
unsigned long fl;
struct rq *prq = task_rq(p);
raw_spin_lock_irqsave(&prq->lock, fl);
schedtune_dequeue_rt(prq, p);
raw_spin_unlock_irqrestore(&prq->lock, fl);
}
return cpu;
}
@ -2401,6 +2542,13 @@ static void rq_offline_rt(struct rq *rq)
*/
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* On class switch from rt, always cancel active schedtune timers,
* this handles the cases where we switch class for a task that is
* already rt-dequeued but has a running timer.
*/
schedtune_dequeue_rt(rq, p);
/*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle

View file

@ -2199,6 +2199,7 @@ extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);

View file

@ -5304,9 +5304,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
return NULL;
}
}
/* We don't expect to fail. */
if (*err) {
pr_cont("FAIL to attach err=%d len=%d\n",
pr_cont("FAIL to prog_create err=%d len=%d\n",
*err, fprog.len);
return NULL;
}
@ -5325,7 +5324,11 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
bpf_prog_select_runtime(fp);
*err = bpf_prog_select_runtime(fp);
if (*err) {
pr_cont("FAIL to select_runtime err=%d\n", *err);
return NULL;
}
break;
}
@ -5511,8 +5514,8 @@ static __init int test_bpf(void)
pass_cnt++;
continue;
}
return err;
err_cnt++;
continue;
}
pr_cont("jited:%u ", fp->jited);

View file

@ -1403,6 +1403,8 @@ static void kmemleak_scan(void)
if (page_count(page) == 0)
continue;
scan_block(page, page + 1, NULL);
if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
cond_resched();
}
}
put_online_mems();

View file

@ -416,3 +416,6 @@ endif # if NET
# Used by archs to tell that they support BPF_JIT
config HAVE_BPF_JIT
bool
config HAVE_EBPF_JIT
bool

View file

@ -430,6 +430,10 @@ do_pass:
convert_bpf_extensions(fp, &insn))
break;
if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
fp->code == (BPF_ALU | BPF_MOD | BPF_X))
*insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
break;
@ -984,7 +988,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
*/
goto out_err_free;
bpf_prog_select_runtime(fp);
err = bpf_prog_select_runtime(fp);
if (err)
goto out_err_free;
kfree(old_prog);
return fp;

View file

@ -292,7 +292,13 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_enable,
.maxlen = sizeof(int),
.mode = 0644,
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
.proc_handler = proc_dointvec
#else
.proc_handler = proc_dointvec_minmax,
.extra1 = &one,
.extra2 = &one,
#endif
},
#endif
{

View file

@ -776,7 +776,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mpath;
u8 ttl, flags, hopcount;
const u8 *orig_addr;
u32 orig_sn, metric, metric_txsta, interval;
u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
bool root_is_gate;
ttl = rann->rann_ttl;
@ -787,7 +787,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
interval = le32_to_cpu(rann->rann_interval);
hopcount = rann->rann_hopcount;
hopcount++;
metric = le32_to_cpu(rann->rann_metric);
orig_metric = le32_to_cpu(rann->rann_metric);
/* Ignore our own RANNs */
if (ether_addr_equal(orig_addr, sdata->vif.addr))
@ -804,7 +804,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
return;
}
metric_txsta = airtime_link_metric_get(local, sta);
last_hop_metric = airtime_link_metric_get(local, sta);
new_metric = orig_metric + last_hop_metric;
if (new_metric < orig_metric)
new_metric = MAX_METRIC;
mpath = mesh_path_lookup(sdata, orig_addr);
if (!mpath) {
@ -817,7 +820,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
if (!(SN_LT(mpath->sn, orig_sn)) &&
!(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
!(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
rcu_read_unlock();
return;
}
@ -835,7 +838,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
mpath->sn = orig_sn;
mpath->rann_metric = metric + metric_txsta;
mpath->rann_metric = new_metric;
mpath->is_root = true;
/* Recording RANNs sender address to send individually
* addressed PREQs destined for root mesh STA */
@ -855,7 +858,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
orig_sn, 0, NULL, 0, broadcast_addr,
hopcount, ttl, interval,
metric + metric_txsta, 0, sdata);
new_metric, 0, sdata);
}
rcu_read_unlock();

View file

@ -1672,14 +1672,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
static struct sw_flow_actions *nla_alloc_flow_actions(int size)
{
struct sw_flow_actions *sfa;
if (size > MAX_ACTIONS_BUFSIZE) {
OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
return ERR_PTR(-EINVAL);
}
WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
if (!sfa)
@ -1752,12 +1749,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
new_acts_size = ksize(*sfa) * 2;
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
OVS_NLERR(log, "Flow action size exceeds max %u",
MAX_ACTIONS_BUFSIZE);
return ERR_PTR(-EMSGSIZE);
}
new_acts_size = MAX_ACTIONS_BUFSIZE;
}
acts = nla_alloc_flow_actions(new_acts_size, log);
acts = nla_alloc_flow_actions(new_acts_size);
if (IS_ERR(acts))
return (void *)acts;
@ -2369,7 +2369,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
{
int err;
*sfa = nla_alloc_flow_actions(nla_len(attr), log);
*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);

View file

@ -2594,6 +2594,15 @@ out_fs:
core_initcall(sock_init); /* early initcall */
static int __init jit_init(void)
{
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
bpf_jit_enable = 1;
#endif
return 0;
}
pure_initcall(jit_init);
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{

View file

@ -2360,6 +2360,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
case -EHOSTUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
/* retry with existing socket, after a delay */

View file

@ -4067,6 +4067,8 @@ static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
struct lsm_network_audit net = {0,};
u32 tsid = task_sid(task);
if (!sksec)
return -EFAULT;
if (sksec->sid == SECINITSID_KERNEL)
return 0;

View file

@ -236,6 +236,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
rwlock_init(&client->ports_lock);
mutex_init(&client->ports_mutex);
INIT_LIST_HEAD(&client->ports_list_head);
mutex_init(&client->ioctl_mutex);
/* find free slot in the client table */
spin_lock_irqsave(&clients_lock, flags);
@ -2195,6 +2196,7 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
void __user *arg)
{
struct seq_ioctl_table *p;
int ret;
switch (cmd) {
case SNDRV_SEQ_IOCTL_PVERSION:
@ -2208,8 +2210,12 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
if (! arg)
return -EFAULT;
for (p = ioctl_tables; p->cmd; p++) {
if (p->cmd == cmd)
return p->func(client, arg);
if (p->cmd == cmd) {
mutex_lock(&client->ioctl_mutex);
ret = p->func(client, arg);
mutex_unlock(&client->ioctl_mutex);
return ret;
}
}
pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));

View file

@ -59,6 +59,7 @@ struct snd_seq_client {
struct list_head ports_list_head;
rwlock_t ports_lock;
struct mutex ports_mutex;
struct mutex ioctl_mutex;
int convert32; /* convert 32->64bit */
/* output pool */

View file

@ -144,6 +144,7 @@ static int bind_device(char *busid)
int rc;
struct udev *udev;
struct udev_device *dev;
const char *devpath;
/* Check whether the device with this bus ID exists. */
udev = udev_new();
@ -152,8 +153,16 @@ static int bind_device(char *busid)
err("device with the specified bus ID does not exist");
return -1;
}
devpath = udev_device_get_devpath(dev);
udev_unref(udev);
/* If the device is already attached to vhci_hcd - bail out */
if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
err("bind loop detected: device: %s is attached to %s\n",
devpath, USBIP_VHCI_DRV_NAME);
return -1;
}
rc = unbind_other(busid);
if (rc == UNBIND_ST_FAILED) {
err("could not unbind driver from device on busid %s", busid);

View file

@ -180,6 +180,7 @@ static int list_devices(bool parsable)
const char *busid;
char product_name[128];
int ret = -1;
const char *devpath;
/* Create libudev context. */
udev = udev_new();
@ -202,6 +203,14 @@ static int list_devices(bool parsable)
path = udev_list_entry_get_name(dev_list_entry);
dev = udev_device_new_from_syspath(udev, path);
/* Ignore devices attached to vhci_hcd */
devpath = udev_device_get_devpath(dev);
if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
dbg("Skip the device %s already attached to %s\n",
devpath, USBIP_VHCI_DRV_NAME);
continue;
}
/* Get device information. */
idVendor = udev_device_get_sysattr_value(dev, "idVendor");
idProduct = udev_device_get_sysattr_value(dev, "idProduct");