Merge branch 'android-4.4@9bc4622' into branch 'msm-4.4'

* refs/heads/tmp-9bc4622:
  Linux 4.4.70
  drivers: char: mem: Check for address space wraparound with mmap()
  nfsd: encoders mustn't use unitialized values in error cases
  drm/edid: Add 10 bpc quirk for LGD 764 panel in HP zBook 17 G2
  PCI: Freeze PME scan before suspending devices
  PCI: Fix pci_mmap_fits() for HAVE_PCI_RESOURCE_TO_USER platforms
  tracing/kprobes: Enforce kprobes teardown after testing
  osf_wait4(): fix infoleak
  genirq: Fix chained interrupt data ordering
  uwb: fix device quirk on big-endian hosts
  metag/uaccess: Check access_ok in strncpy_from_user
  metag/uaccess: Fix access_ok()
  iommu/vt-d: Flush the IOTLB to get rid of the initial kdump mappings
  staging: rtl8192e: rtl92e_get_eeprom_size Fix read size of EPROM_CMD.
  staging: rtl8192e: fix 2 byte alignment of register BSSIDR.
  mm/huge_memory.c: respect FOLL_FORCE/FOLL_COW for thp
  xc2028: Fix use-after-free bug properly
  arm64: documentation: document tagged pointer stack constraints
  arm64: uaccess: ensure extension of access_ok() addr
  arm64: xchg: hazard against entire exchange variable
  ARM: dts: at91: sama5d3_xplained: not all ADC channels are available
  ARM: dts: at91: sama5d3_xplained: fix ADC vref
  powerpc/64e: Fix hang when debugging programs with relocated kernel
  powerpc/pseries: Fix of_node_put() underflow during DLPAR remove
  powerpc/book3s/mce: Move add_taint() later in virtual mode
  cx231xx-cards: fix NULL-deref at probe
  cx231xx-audio: fix NULL-deref at probe
  cx231xx-audio: fix init error path
  dvb-frontends/cxd2841er: define symbol_rate_min/max in T/C fe-ops
  zr364xx: enforce minimum size when reading header
  dib0700: fix NULL-deref at probe
  s5p-mfc: Fix unbalanced call to clock management
  gspca: konica: add missing endpoint sanity check
  ceph: fix recursion between ceph_set_acl() and __ceph_setattr()
  iio: proximity: as3935: fix as3935_write
  ipx: call ipxitf_put() in ioctl error path
  USB: hub: fix non-SS hub-descriptor handling
  USB: hub: fix SS hub-descriptor handling
  USB: serial: io_ti: fix div-by-zero in set_termios
  USB: serial: mct_u232: fix big-endian baud-rate handling
  USB: serial: qcserial: add more Lenovo EM74xx device IDs
  usb: serial: option: add Telit ME910 support
  USB: iowarrior: fix info ioctl on big-endian hosts
  usb: musb: tusb6010_omap: Do not reset the other direction's packet size
  ttusb2: limit messages to buffer size
  mceusb: fix NULL-deref at probe
  usbvision: fix NULL-deref at probe
  net: irda: irda-usb: fix firmware name on big-endian hosts
  usb: host: xhci-mem: allocate zeroed Scratchpad Buffer
  xhci: apply PME_STUCK_QUIRK and MISSING_CAS quirk for Denverton
  usb: host: xhci-plat: propagate return value of platform_get_irq()
  sched/fair: Initialize throttle_count for new task-groups lazily
  sched/fair: Do not announce throttled next buddy in dequeue_task_fair()
  fscrypt: avoid collisions when presenting long encrypted filenames
  f2fs: check entire encrypted bigname when finding a dentry
  fscrypt: fix context consistency check when key(s) unavailable
  net: qmi_wwan: Add SIMCom 7230E
  ext4 crypto: fix some error handling
  ext4 crypto: don't let data integrity writebacks fail with ENOMEM
  USB: serial: ftdi_sio: add Olimex ARM-USB-TINY(H) PIDs
  USB: serial: ftdi_sio: fix setting latency for unprivileged users
  pid_ns: Fix race between setns'ed fork() and zap_pid_ns_processes()
  pid_ns: Sleep in TASK_INTERRUPTIBLE in zap_pid_ns_processes
  iio: dac: ad7303: fix channel description
  of: fix sparse warning in of_pci_range_parser_one
  proc: Fix unbalanced hard link numbers
  cdc-acm: fix possible invalid access when processing notification
  drm/nouveau/tmr: handle races with hw when updating the next alarm time
  drm/nouveau/tmr: avoid processing completed alarms when adding a new one
  drm/nouveau/tmr: fix corruption of the pending list when rescheduling an alarm
  drm/nouveau/tmr: ack interrupt before processing alarms
  drm/nouveau/therm: remove ineffective workarounds for alarm bugs
  drm/amdgpu: Make display watermark calculations more accurate
  drm/amdgpu: Avoid overflows/divide-by-zero in latency_watermark calculations.
  ath9k_htc: fix NULL-deref at probe
  ath9k_htc: Add support of AirTies 1eda:2315 AR9271 device
  s390/cputime: fix incorrect system time
  s390/kdump: Add final note
  regulator: tps65023: Fix inverted core enable logic.
  KVM: X86: Fix read out-of-bounds vulnerability in kvm pio emulation
  KVM: x86: Fix load damaged SSEx MXCSR register
  ima: accept previously set IMA_NEW_FILE
  mwifiex: pcie: fix cmd_buf use-after-free in remove/reset
  rtlwifi: rtl8821ae: setup 8812ae RFE according to device type
  md: update slab_cache before releasing new stripes when stripes resizing
  dm space map disk: fix some book keeping in the disk space map
  dm thin metadata: call precommit before saving the roots
  dm bufio: make the parameter "retain_bytes" unsigned long
  dm cache metadata: fail operations if fail_io mode has been established
  dm bufio: check new buffer allocation watermark every 30 seconds
  dm bufio: avoid a possible ABBA deadlock
  dm raid: select the Kconfig option CONFIG_MD_RAID0
  dm btree: fix for dm_btree_find_lowest_key()
  infiniband: call ipv6 route lookup via the stub interface
  tpm_crb: check for bad response size
  ARM: tegra: paz00: Mark panel regulator as enabled on boot
  USB: core: replace %p with %pK
  char: lp: fix possible integer overflow in lp_setup()
  watchdog: pcwd_usb: fix NULL-deref at probe
  USB: ene_usb6250: fix DMA to the stack
  usb: misc: legousbtower: Fix memory leak
  usb: misc: legousbtower: Fix buffers on stack
  ANDROID: uid_sys_stats: defer io stats calulation for dead tasks
  ANDROID: AVB: Fix linter errors.
  ANDROID: AVB: Fix invalidate_vbmeta_submit().
  ANDROID: sdcardfs: Check for NULL in revalidate
  Linux 4.4.69
  ipmi: Fix kernel panic at ipmi_ssif_thread()
  wlcore: Add RX_BA_WIN_SIZE_CHANGE_EVENT event
  wlcore: Pass win_size taken from ieee80211_sta to FW
  mac80211: RX BA support for sta max_rx_aggregation_subframes
  mac80211: pass block ack session timeout to to driver
  mac80211: pass RX aggregation window size to driver
  Bluetooth: hci_intel: add missing tty-device sanity check
  Bluetooth: hci_bcm: add missing tty-device sanity check
  Bluetooth: Fix user channel for 32bit userspace on 64bit kernel
  tty: pty: Fix ldisc flush after userspace become aware of the data already
  serial: omap: suspend device on probe errors
  serial: omap: fix runtime-pm handling on unbind
  serial: samsung: Use right device for DMA-mapping calls
  arm64: KVM: Fix decoding of Rt/Rt2 when trapping AArch32 CP accesses
  padata: free correct variable
  CIFS: add misssing SFM mapping for doublequote
  cifs: fix CIFS_IOC_GET_MNT_INFO oops
  CIFS: fix mapping of SFM_SPACE and SFM_PERIOD
  SMB3: Work around mount failure when using SMB3 dialect to Macs
  Set unicode flag on cifs echo request to avoid Mac error
  fs/block_dev: always invalidate cleancache in invalidate_bdev()
  ceph: fix memory leak in __ceph_setxattr()
  fs/xattr.c: zero out memory copied to userspace in getxattr
  ext4: evict inline data when writing to memory map
  IB/mlx4: Reduce SRIOV multicast cleanup warning message to debug level
  IB/mlx4: Fix ib device initialization error flow
  IB/IPoIB: ibX: failed to create mcg debug file
  IB/core: Fix sysfs registration error flow
  vfio/type1: Remove locked page accounting workqueue
  dm era: save spacemap metadata root after the pre-commit
  crypto: algif_aead - Require setkey before accept(2)
  block: fix blk_integrity_register to use template's interval_exp if not 0
  KVM: arm/arm64: fix races in kvm_psci_vcpu_on
  KVM: x86: fix user triggerable warning in kvm_apic_accept_events()
  um: Fix PTRACE_POKEUSER on x86_64
  x86, pmem: Fix cache flushing for iovec write < 8 bytes
  selftests/x86/ldt_gdt_32: Work around a glibc sigaction() bug
  x86/boot: Fix BSS corruption/overwrite bug in early x86 kernel startup
  usb: hub: Do not attempt to autosuspend disconnected devices
  usb: hub: Fix error loop seen after hub communication errors
  usb: Make sure usb/phy/of gets built-in
  usb: misc: add missing continue in switch
  staging: comedi: jr3_pci: cope with jiffies wraparound
  staging: comedi: jr3_pci: fix possible null pointer dereference
  staging: gdm724x: gdm_mux: fix use-after-free on module unload
  staging: vt6656: use off stack for out buffer USB transfers.
  staging: vt6656: use off stack for in buffer USB transfers.
  USB: Proper handling of Race Condition when two USB class drivers try to call init_usb_class simultaneously
  USB: serial: ftdi_sio: add device ID for Microsemi/Arrow SF2PLUS Dev Kit
  usb: host: xhci: print correct command ring address
  iscsi-target: Set session_fall_back_to_erl0 when forcing reinstatement
  target: Convert ACL change queue_depth se_session reference usage
  target/fileio: Fix zero-length READ and WRITE handling
  target: Fix compare_and_write_callback handling for non GOOD status
  xen: adjust early dom0 p2m handling to xen hypervisor behavior
  ANDROID: AVB: Only invalidate vbmeta when told to do so.
  ANDROID: sdcardfs: Move top to its own struct
  ANDROID: lowmemorykiller: account for unevictable pages
  ANDROID: usb: gadget: fix NULL pointer issue in mtp_read()
  ANDROID: usb: f_mtp: return error code if transfer error in receive_file_work function

Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>

Conflicts:
	drivers/usb/gadget/function/f_mtp.c
	fs/ext4/page-io.c
	net/mac80211/agg-rx.c

Change-Id: Id65e75bf3bcee4114eb5d00730a9ef2444ad58eb
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2017-05-31 16:59:54 -07:00
commit 2025064255
182 changed files with 1877 additions and 944 deletions

View file

@ -11,24 +11,56 @@ in AArch64 Linux.
The kernel configures the translation tables so that translations made
via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
the virtual address ignored by the translation hardware. This frees up
this byte for application use, with the following caveats:
this byte for application use.
(1) The kernel requires that all user addresses passed to EL1
are tagged with tag 0x00. This means that any syscall
parameters containing user virtual addresses *must* have
their top byte cleared before trapping to the kernel.
(2) Non-zero tags are not preserved when delivering signals.
This means that signal handlers in applications making use
of tags cannot rely on the tag information for user virtual
addresses being maintained for fields inside siginfo_t.
One exception to this rule is for signals raised in response
to watchpoint debug exceptions, where the tag information
will be preserved.
Passing tagged addresses to the kernel
--------------------------------------
(3) Special care should be taken when using tagged pointers,
since it is likely that C compilers will not hazard two
virtual addresses differing only in the upper byte.
All interpretation of userspace memory addresses by the kernel assumes
an address tag of 0x00.
This includes, but is not limited to, addresses found in:
- pointer arguments to system calls, including pointers in structures
passed to system calls,
- the stack pointer (sp), e.g. when interpreting it to deliver a
signal,
- the frame pointer (x29) and frame records, e.g. when interpreting
them to generate a backtrace or call graph.
Using non-zero address tags in any of these locations may result in an
error code being returned, a (fatal) signal being raised, or other modes
of failure.
For these reasons, passing non-zero address tags to the kernel via
system calls is forbidden, and using a non-zero address tag for sp is
strongly discouraged.
Programs maintaining a frame pointer and frame records that use non-zero
address tags may suffer impaired or inaccurate debug and profiling
visibility.
Preserving tags
---------------
Non-zero tags are not preserved when delivering signals. This means that
signal handlers in applications making use of tags cannot rely on the
tag information for user virtual addresses being maintained for fields
inside siginfo_t. One exception to this rule is for signals raised in
response to watchpoint debug exceptions, where the tag information will
be preserved.
The architecture prevents the use of a tagged PC, so the upper byte will
be set to a sign-extension of bit 55 on exception return.
Other considerations
--------------------
Special care should be taken when using tagged pointers, since it is
likely that C compilers will not hazard two virtual addresses differing
only in the upper byte.

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 68
SUBLEVEL = 70
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -1188,8 +1188,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
err = 0;
err |= put_user(status, ustatus);
err = put_user(status, ustatus);
if (ret < 0)
return err ? err : ret;
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);

View file

@ -162,9 +162,10 @@
};
adc0: adc@f8018000 {
atmel,adc-vref = <3300>;
atmel,adc-channels-used = <0xfe>;
pinctrl-0 = <
&pinctrl_adc0_adtrg
&pinctrl_adc0_ad0
&pinctrl_adc0_ad1
&pinctrl_adc0_ad2
&pinctrl_adc0_ad3
@ -172,8 +173,6 @@
&pinctrl_adc0_ad5
&pinctrl_adc0_ad6
&pinctrl_adc0_ad7
&pinctrl_adc0_ad8
&pinctrl_adc0_ad9
>;
status = "okay";
};

View file

@ -565,6 +565,7 @@
regulator-name = "+3VS,vdd_pnl";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
enable-active-high;
};

View file

@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
struct kvm *kvm = vcpu->kvm;
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
int ret = 1;
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
case PSCI_0_2_FN_CPU_ON:
case PSCI_0_2_FN64_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
case PSCI_0_2_FN64_AFFINITY_INFO:
@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;

View file

@ -48,7 +48,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \
" swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
" nop\n" \
" " #nop_lse) \
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
: "r" (x) \
: cl); \
\

View file

@ -108,11 +108,12 @@ static inline void set_fs(mm_segment_t fs)
*/
#define __range_ok(addr, size) \
({ \
unsigned long __addr = (unsigned long __force)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
: "1" (addr), "Ir" (size), \
: "1" (__addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
: "cc"); \
flag; \

View file

@ -1055,8 +1055,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt = (hsr >> 5) & 0xf;
int Rt2 = (hsr >> 10) & 0xf;
int Rt = (hsr >> 5) & 0x1f;
int Rt2 = (hsr >> 10) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = false;
@ -1107,7 +1107,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt = (hsr >> 5) & 0xf;
int Rt = (hsr >> 5) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = true;

View file

@ -28,24 +28,32 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
* Explicitly allow NULL pointers here. Parts of the kernel such
* as readv/writev use access_ok to validate pointers, but want
* to allow NULL pointers for various reasons. NULL pointers are
* safe to allow through because the first page is not mappable on
* Meta.
*
* We also wish to avoid letting user code access the system area
* and the kernel half of the address space.
*/
#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
((addr) > PAGE_OFFSET && \
(addr) < LINCORE_BASE))
static inline int __access_ok(unsigned long addr, unsigned long size)
{
return __kernel_ok || !__user_bad(addr, size);
/*
* Allow access to the user mapped memory area, but not the system area
* before it. The check extends to the top of the address space when
* kernel access is allowed (there's no real reason to user copy to the
* system area in any case).
*/
if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
size <= get_fs().seg - addr))
return true;
/*
* Explicitly allow NULL pointers here. Parts of the kernel such
* as readv/writev use access_ok to validate pointers, but want
* to allow NULL pointers for various reasons. NULL pointers are
* safe to allow through because the first page is not mappable on
* Meta.
*/
if (!addr)
return true;
/* Allow access to core code memory area... */
if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
size <= LINCORE_CODE_LIMIT + 1 - addr)
return true;
/* ... but no other areas. */
return false;
}
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
@ -186,8 +194,13 @@ do { \
extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
long count);
#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
/*
* Return the size of a string (including the ending 0)
*

View file

@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
#ifdef CONFIG_RELOCATABLE
ld r15,PACATOC(r13)
ld r14,interrupt_base_book3e@got(r15)
ld r15,__end_interrupts@got(r15)
#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@ -799,8 +805,14 @@ kernel_dbg_exc:
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
#ifdef CONFIG_RELOCATABLE
ld r15,PACATOC(r13)
ld r14,interrupt_base_book3e@got(r15)
ld r15,__end_interrupts@got(r15)
#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f

View file

@ -204,6 +204,8 @@ static void machine_check_process_queued_event(struct irq_work *work)
{
int index;
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
/*
* For now just print it to console.
* TODO: log this error event to FSP or nvram.

View file

@ -297,8 +297,6 @@ long machine_check_early(struct pt_regs *regs)
__this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
@ -704,6 +702,8 @@ void machine_check_exception(struct pt_regs *regs)
__this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
* one returns a positive number. However there is existing code

View file

@ -280,7 +280,6 @@ int dlpar_detach_node(struct device_node *dn)
if (rc)
return rc;
of_node_put(dn); /* Must decrement the refcount */
return 0;
}

View file

@ -463,6 +463,20 @@ static void *nt_vmcoreinfo(void *ptr)
return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
}
/*
* Initialize final note (needed for /proc/vmcore code)
*/
static void *nt_final(void *ptr)
{
Elf64_Nhdr *note;
note = (Elf64_Nhdr *) ptr;
note->n_namesz = 0;
note->n_descsz = 0;
note->n_type = 0;
return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
}
/*
* Initialize ELF header (new kernel)
*/
@ -553,6 +567,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
}
ptr = nt_vmcoreinfo(ptr);
ptr = nt_final(ptr);
memset(phdr, 0, sizeof(*phdr));
phdr->p_type = PT_NOTE;
phdr->p_offset = notes_offset;

View file

@ -308,6 +308,7 @@ ENTRY(system_call)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@ -593,6 +594,7 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@ -1118,15 +1120,23 @@ cleanup_critical:
br %r14
.Lcleanup_sysc_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_sysc_restore_insn)
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW
1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
@ -1134,15 +1144,20 @@ cleanup_critical:
br %r14
.Lcleanup_io_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_io_restore_insn)
je 0f
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW
1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
.Lcleanup_idle:

View file

@ -16,7 +16,7 @@
#ifndef BOOT_BOOT_H
#define BOOT_BOOT_H
#define STACK_SIZE 512 /* Minimum number of bytes for stack */
#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
#ifndef __ASSEMBLY__

View file

@ -122,7 +122,7 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
if (bytes < 8) {
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
__arch_wb_cache_pmem(addr, 1);
__arch_wb_cache_pmem(addr, bytes);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);

View file

@ -96,6 +96,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
* Boot time FPU feature detection code:
*/
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
{

View file

@ -2960,6 +2960,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
| KVM_VCPUEVENT_VALID_SMM))
return -EINVAL;
/* INITs are latched while in SMM */
if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
(events->smi.smm || events->smi.pending) &&
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
return -EINVAL;
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
@ -3134,11 +3140,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
}
}
#define XSAVE_MXCSR_OFFSET 24
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
if (cpu_has_xsave) {
/*
@ -3146,11 +3155,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
* CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
* with old userspace.
*/
if (xstate_bv & ~kvm_supported_xcr0())
if (xstate_bv & ~kvm_supported_xcr0() ||
mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
if (xstate_bv & ~XFEATURE_MASK_FPSSE)
if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state.fxsave,
guest_xsave->region, sizeof(struct fxregs_state));
@ -4597,16 +4608,20 @@ emul_write:
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
/* TODO: String I/O for in kernel device */
int r;
int r = 0, i;
if (vcpu->arch.pio.in)
r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
for (i = 0; i < vcpu->arch.pio.count; i++) {
if (vcpu->arch.pio.in)
r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
if (r)
break;
pd += vcpu->arch.pio.size;
}
return r;
}
@ -4644,6 +4659,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
if (vcpu->arch.pio.count)
goto data_avail;
memset(vcpu->arch.pio_data, 0, size * count);
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
@ -6993,6 +7010,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
return -EINVAL;
/* INITs are latched while in SMM */
if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
(mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
return -EINVAL;
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);

View file

@ -121,7 +121,7 @@ int poke_user(struct task_struct *child, long addr, long data)
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
addr = addr >> 3;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;

View file

@ -2038,7 +2038,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
/*
* Translate a virtual address to a physical one without relying on mapped
* page tables.
* page tables. Don't rely on big pages being aligned in (guest) physical
* space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
@ -2059,7 +2060,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
sizeof(pud)));
if (!pud_present(pud))
return 0;
pa = pud_pfn(pud) << PAGE_SHIFT;
pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud))
return pa + (vaddr & ~PUD_MASK);
@ -2067,7 +2068,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
pa = pmd_pfn(pmd) << PAGE_SHIFT;
pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_large(pmd))
return pa + (vaddr & ~PMD_MASK);

View file

@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
template->flags;
bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
bi->interval_exp = template->interval_exp ? :
ilog2(queue_logical_block_size(disk->queue));
bi->profile = template->profile ? template->profile : &nop_profile;
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;

View file

@ -29,6 +29,11 @@ struct aead_sg_list {
struct scatterlist sg[ALG_MAX_PAGES];
};
struct aead_tfm {
struct crypto_aead *aead;
bool has_key;
};
struct aead_ctx {
struct aead_sg_list tsgl;
/*
@ -513,24 +518,146 @@ static struct proto_ops algif_aead_ops = {
.poll = aead_poll,
};
static int aead_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct aead_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (ask->refcnt)
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (!tfm->has_key)
goto unlock;
if (!pask->refcnt++)
sock_hold(psk);
ask->refcnt = 1;
sock_put(psk);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_sendmsg(sock, msg, size);
}
static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_sendpage(sock, page, offset, size, flags);
}
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_recvmsg(sock, msg, ignored, flags);
}
static struct proto_ops algif_aead_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.setsockopt = sock_no_setsockopt,
.release = af_alg_release,
.sendmsg = aead_sendmsg_nokey,
.sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll = aead_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_aead(name, type, mask);
struct aead_tfm *tfm;
struct crypto_aead *aead;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
aead = crypto_alloc_aead(name, type, mask);
if (IS_ERR(aead)) {
kfree(tfm);
return ERR_CAST(aead);
}
tfm->aead = aead;
return tfm;
}
static void aead_release(void *private)
{
crypto_free_aead(private);
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
kfree(tfm);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
return crypto_aead_setauthsize(private, authsize);
struct aead_tfm *tfm = private;
return crypto_aead_setauthsize(tfm->aead, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_aead_setkey(private, key, keylen);
struct aead_tfm *tfm = private;
int err;
err = crypto_aead_setkey(tfm->aead, key, keylen);
tfm->has_key = !err;
return err;
}
static void aead_sock_destruct(struct sock *sk)
@ -546,12 +673,14 @@ static void aead_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
static int aead_accept_parent(void *private, struct sock *sk)
static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct aead_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
unsigned int ivlen = crypto_aead_ivsize(private);
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@ -577,7 +706,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
aead_request_set_tfm(&ctx->aead_req, private);
aead_request_set_tfm(&ctx->aead_req, aead);
aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
@ -586,13 +715,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
return 0;
}
static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
if (!tfm->has_key)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_aead = {
.bind = aead_bind,
.release = aead_release,
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.accept = aead_accept_parent,
.accept_nokey = aead_accept_parent_nokey,
.ops = &algif_aead_ops,
.ops_nokey = &algif_aead_ops_nokey,
.name = "aead",
.owner = THIS_MODULE
};

View file

@ -99,6 +99,7 @@ obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/
obj-$(CONFIG_OF) += usb/
obj-$(CONFIG_SERIO) += input/serio/
obj-$(CONFIG_GAMEPORT) += input/gameport/
obj-$(CONFIG_INPUT) += input/

View file

@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu)
hu->priv = bcm;
if (!hu->tty->dev)
goto out;
mutex_lock(&bcm_device_lock);
list_for_each(p, &bcm_device_list) {
struct bcm_device *dev = list_entry(p, struct bcm_device, list);
@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu)
}
mutex_unlock(&bcm_device_lock);
out:
return 0;
}

View file

@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
struct list_head *p;
int err = -ENODEV;
if (!hu->tty->dev)
return err;
mutex_lock(&intel_device_list_lock);
list_for_each(p, &intel_device_list) {
@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work)
struct intel_data *intel = container_of(work, struct intel_data,
busy_work);
if (!intel->hu->tty->dev)
return;
/* Link is busy, delay the suspend */
mutex_lock(&intel_device_list_lock);
list_for_each(p, &intel_device_list) {
@ -913,6 +919,8 @@ done:
list_for_each(p, &intel_device_list) {
struct intel_device *dev = list_entry(p, struct intel_device,
list);
if (!hu->tty->dev)
break;
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
if (device_may_wakeup(&dev->pdev->dev))
idev = dev;
@ -1094,6 +1102,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
BT_DBG("hu %p skb %p", hu, skb);
if (!hu->tty->dev)
goto out_enqueue;
/* Be sure our controller is resumed and potential LPM transaction
* completed before enqueuing any packet.
*/
@ -1110,7 +1121,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
}
}
mutex_unlock(&intel_device_list_lock);
out_enqueue:
skb_queue_tail(&intel->txq, skb);
return 0;

View file

@ -888,6 +888,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
* for details on the intricacies of this.
*/
int left;
unsigned char *data_to_send;
ssif_inc_stat(ssif_info, sent_messages_parts);
@ -896,6 +897,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
left = 32;
/* Length byte. */
ssif_info->multi_data[ssif_info->multi_pos] = left;
data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
ssif_info->multi_pos += left;
if (left < 32)
/*
@ -909,7 +911,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
rv = ssif_i2c_send(ssif_info, msg_written_handler,
I2C_SMBUS_WRITE,
SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
ssif_info->multi_data + ssif_info->multi_pos,
data_to_send,
I2C_SMBUS_BLOCK_DATA);
if (rv < 0) {
/* request failed, just return the error. */

View file

@ -859,7 +859,11 @@ static int __init lp_setup (char *str)
} else if (!strcmp(str, "auto")) {
parport_nr[0] = LP_PARPORT_AUTO;
} else if (!strcmp(str, "none")) {
parport_nr[parport_ptr++] = LP_PARPORT_NONE;
if (parport_ptr < LP_NO)
parport_nr[parport_ptr++] = LP_PARPORT_NONE;
else
printk(KERN_INFO "lp: too many ports, %s ignored.\n",
str);
} else if (!strcmp(str, "reset")) {
reset = 1;
}

View file

@ -343,6 +343,11 @@ static const struct vm_operations_struct mmap_mem_ops = {
static int mmap_mem(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size < offset)
return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;

View file

@ -118,8 +118,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
memcpy_fromio(buf, priv->rsp, 6);
expected = be32_to_cpup((__be32 *) &buf[2]);
if (expected > count)
if (expected > count || expected < 6)
return -EIO;
memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);

View file

@ -1126,23 +1126,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@ -1250,14 +1237,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@ -1272,7 +1259,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -1311,7 +1298,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)

View file

@ -1114,23 +1114,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@ -1238,14 +1225,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@ -1260,7 +1247,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -1299,7 +1286,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)

View file

@ -1096,23 +1096,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@ -1220,14 +1207,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@ -1242,7 +1229,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -1281,7 +1268,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)

View file

@ -75,6 +75,8 @@
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
struct detailed_mode_closure {
struct drm_connector *connector;
@ -125,6 +127,9 @@ static struct edid_quirk {
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@ -4478,6 +4483,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
if (quirks & EDID_QUIRK_FORCE_10BPC)
connector->display_info.bpc = 10;
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;

View file

@ -130,7 +130,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
poll = false;
}
if (list_empty(&therm->alarm.head) && poll)
if (poll)
nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
spin_unlock_irqrestore(&therm->lock, flags);

View file

@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
if (list_empty(&fan->alarm.head) && target != duty) {
if (target != duty) {
u16 bump_period = fan->bios.bump_period;
u16 slow_down_period = fan->bios.slow_down_period;
u64 delay;

View file

@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
if (percent != (duty * 100)) {
u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
next_change = fan->period_us - next_change;

View file

@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
if (therm->func->temp_get(therm) >= 0)
nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}

View file

@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
unsigned long flags;
LIST_HEAD(exec);
/* move any due alarms off the pending list */
/* Process pending alarms. */
spin_lock_irqsave(&tmr->lock, flags);
list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
if (alarm->timestamp <= nvkm_timer_read(tmr))
list_move_tail(&alarm->head, &exec);
/* Have we hit the earliest alarm that hasn't gone off? */
if (alarm->timestamp > nvkm_timer_read(tmr)) {
/* Schedule it. If we didn't race, we're done. */
tmr->func->alarm_init(tmr, alarm->timestamp);
if (alarm->timestamp > nvkm_timer_read(tmr))
break;
}
/* Move to completed list. We'll drop the lock before
* executing the callback so it can reschedule itself.
*/
list_move_tail(&alarm->head, &exec);
}
/* reschedule interrupt for next alarm time */
if (!list_empty(&tmr->alarms)) {
alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
tmr->func->alarm_init(tmr, alarm->timestamp);
} else {
/* Shut down interrupt if no more pending alarms. */
if (list_empty(&tmr->alarms))
tmr->func->alarm_fini(tmr);
}
spin_unlock_irqrestore(&tmr->lock, flags);
/* execute any pending alarm handlers */
/* Execute completed callbacks. */
list_for_each_entry_safe(alarm, atemp, &exec, head) {
list_del_init(&alarm->head);
alarm->func(alarm);
@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
struct nvkm_alarm *list;
unsigned long flags;
alarm->timestamp = nvkm_timer_read(tmr) + nsec;
/* append new alarm to list, in soonest-alarm-first order */
/* Remove alarm from pending list.
*
* This both protects against the corruption of the list,
* and implements alarm rescheduling/cancellation.
*/
spin_lock_irqsave(&tmr->lock, flags);
if (!nsec) {
if (!list_empty(&alarm->head))
list_del(&alarm->head);
} else {
list_del_init(&alarm->head);
if (nsec) {
/* Insert into pending list, ordered earliest to latest. */
alarm->timestamp = nvkm_timer_read(tmr) + nsec;
list_for_each_entry(list, &tmr->alarms, head) {
if (list->timestamp > alarm->timestamp)
break;
}
list_add_tail(&alarm->head, &list->head);
/* Update HW if this is now the earliest alarm. */
list = list_first_entry(&tmr->alarms, typeof(*list), head);
if (list == alarm) {
tmr->func->alarm_init(tmr, alarm->timestamp);
/* This shouldn't happen if callers aren't stupid.
*
* Worst case scenario is that it'll take roughly
* 4 seconds for the next alarm to trigger.
*/
WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
}
}
spin_unlock_irqrestore(&tmr->lock, flags);
/* process pending alarms */
nvkm_timer_alarm_trigger(tmr);
}
void

View file

@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
nvkm_timer_alarm_trigger(tmr);
nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
nvkm_timer_alarm_trigger(tmr);
stat &= ~0x00000001;
}

View file

@ -184,9 +184,9 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
.address = (chan), \
.scan_type = { \
.sign = 'u', \
.realbits = '8', \
.storagebits = '8', \
.shift = '0', \
.realbits = 8, \
.storagebits = 8, \
.shift = 0, \
}, \
.ext_info = ad7303_ext_info, \
}

View file

@ -50,7 +50,6 @@
#define AS3935_TUNE_CAP 0x08
#define AS3935_CALIBRATE 0x3D
#define AS3935_WRITE_DATA BIT(15)
#define AS3935_READ_DATA BIT(14)
#define AS3935_ADDRESS(x) ((x) << 8)
@ -105,7 +104,7 @@ static int as3935_write(struct as3935_state *st,
{
u8 *buf = st->buf;
buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
buf[0] = AS3935_ADDRESS(reg) >> 8;
buf[1] = val;
return spi_write(st->spi, buf, 2);

View file

@ -277,8 +277,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
dst = ip6_route_output(addr->net, NULL, &fl6);
if ((ret = dst->error))
ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
if (ret < 0)
goto put;
if (ipv6_addr_any(&fl6.saddr)) {

View file

@ -863,7 +863,7 @@ err_put:
free_port_list_attributes(device);
err_unregister:
device_unregister(class_dev);
device_del(class_dev);
err:
return ret;

View file

@ -2491,6 +2491,7 @@ err_counter:
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
err_map:
mlx4_ib_free_eqs(dev, ibdev);
iounmap(ibdev->uar_map);
err_uar:

View file

@ -1105,7 +1105,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
while ((p = rb_first(&ctx->mcg_table)) != NULL) {
group = rb_entry(p, struct mcast_group, node);
if (atomic_read(&group->refcount))
mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
atomic_read(&group->refcount), group);
force_clean_group(group);
}

View file

@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL;
}
int ipoib_register_debugfs(void)

View file

@ -106,6 +106,33 @@ static struct ib_client ipoib_client = {
.get_net_dev_by_params = ipoib_get_net_dev_by_params,
};
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
static int ipoib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netdev_notifier_info *ni = ptr;
struct net_device *dev = ni->dev;
if (dev->netdev_ops->ndo_open != ipoib_open)
return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
ipoib_create_debug_files(dev);
break;
case NETDEV_CHANGENAME:
ipoib_delete_debug_files(dev);
ipoib_create_debug_files(dev);
break;
case NETDEV_UNREGISTER:
ipoib_delete_debug_files(dev);
break;
}
return NOTIFY_DONE;
}
#endif
int ipoib_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@ -1595,8 +1622,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
ASSERT_RTNL();
ipoib_delete_debug_files(dev);
/* Delete any child interfaces first */
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
/* Stop GC on child */
@ -1908,8 +1933,6 @@ static struct net_device *ipoib_add_port(const char *format,
goto register_failed;
}
ipoib_create_debug_files(priv->dev);
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
@ -1924,7 +1947,6 @@ static struct net_device *ipoib_add_port(const char *format,
return priv->dev;
sysfs_failed:
ipoib_delete_debug_files(priv->dev);
unregister_netdev(priv->dev);
register_failed:
@ -2006,6 +2028,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
kfree(dev_list);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
static struct notifier_block ipoib_netdev_notifier = {
.notifier_call = ipoib_netdev_event,
};
#endif
static int __init ipoib_init_module(void)
{
int ret;
@ -2057,6 +2085,9 @@ static int __init ipoib_init_module(void)
if (ret)
goto err_client;
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
register_netdevice_notifier(&ipoib_netdev_notifier);
#endif
return 0;
err_client:
@ -2074,6 +2105,9 @@ err_fs:
static void __exit ipoib_cleanup_module(void)
{
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
unregister_netdevice_notifier(&ipoib_netdev_notifier);
#endif
ipoib_netlink_fini();
ib_unregister_client(&ipoib_client);
ib_sa_unregister_client(&ipoib_sa_client);

View file

@ -85,8 +85,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto register_failed;
}
ipoib_create_debug_files(priv->dev);
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
if (ipoib_cm_add_mode_attr(priv->dev))
@ -107,7 +105,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
sysfs_failed:
result = -ENOMEM;
ipoib_delete_debug_files(priv->dev);
unregister_netdevice(priv->dev);
register_failed:

View file

@ -2005,11 +2005,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
}
pgd = domain->pgd;

View file

@ -374,6 +374,7 @@ config DM_LOG_USERSPACE
config DM_RAID
tristate "RAID 1/4/5/6/10 target"
depends on BLK_DEV_DM
select MD_RAID0
select MD_RAID1
select MD_RAID10
select MD_RAID456

View file

@ -222,7 +222,7 @@ static DEFINE_SPINLOCK(param_spinlock);
* Buffers are freed after this timeout
*/
static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
static unsigned long dm_bufio_allocated_kmem_cache;
@ -914,10 +914,11 @@ static void __get_memory_limit(struct dm_bufio_client *c,
{
unsigned long buffers;
if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
mutex_lock(&dm_bufio_clients_lock);
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
}
}
buffers = dm_bufio_cache_size_per_client >>
@ -1513,10 +1514,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
return true;
}
static unsigned get_retain_buffers(struct dm_bufio_client *c)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes / c->block_size;
unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@ -1526,7 +1527,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
struct dm_buffer *b, *tmp;
unsigned long freed = 0;
unsigned long count = nr_to_scan;
unsigned retain_target = get_retain_buffers(c);
unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) {
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@ -1752,11 +1753,19 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{
struct dm_buffer *b, *tmp;
unsigned retain_target = get_retain_buffers(c);
unsigned count;
unsigned long retain_target = get_retain_buffers(c);
unsigned long count;
LIST_HEAD(write_list);
dm_bufio_lock(c);
__check_watermark(c, &write_list);
if (unlikely(!list_empty(&write_list))) {
dm_bufio_unlock(c);
__flush_write_list(&write_list);
dm_bufio_lock(c);
}
count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
if (count <= retain_target)
@ -1781,6 +1790,8 @@ static void cleanup_old_buffers(void)
mutex_lock(&dm_bufio_clients_lock);
__cache_size_refresh();
list_for_each_entry(c, &dm_bufio_all_clients, client_list)
__evict_old_buffers(c, max_age_hz);
@ -1904,7 +1915,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);

View file

@ -1326,17 +1326,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
int r;
int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
if (cmd->fail_io)
goto out;
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
out:
WRITE_UNLOCK(cmd);
return r;
@ -1348,7 +1350,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
if (!cmd->fail_io)
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
@ -1360,7 +1363,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
if (!cmd->fail_io)
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;

View file

@ -961,18 +961,18 @@ static int metadata_commit(struct era_metadata *md)
}
}
r = save_sm_root(md);
if (r) {
DMERR("%s: save_sm_root failed", __func__);
return r;
}
r = dm_tm_pre_commit(md->tm);
if (r) {
DMERR("%s: pre commit failed", __func__);
return r;
}
r = save_sm_root(md);
if (r) {
DMERR("%s: save_sm_root failed", __func__);
return r;
}
r = superblock_lock(md, &sblock);
if (r) {
DMERR("%s: superblock lock failed", __func__);

View file

@ -485,11 +485,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
if (r < 0)
return r;
r = save_sm_roots(pmd);
r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
return r;
r = dm_tm_pre_commit(pmd->tm);
r = save_sm_roots(pmd);
if (r < 0)
return r;

View file

@ -12,11 +12,14 @@
#define DM_MSG_PREFIX "verity-avb"
/* Set via module parameter. */
/* Set via module parameters. */
static char avb_vbmeta_device[64];
static char avb_invalidate_on_error[4];
static void invalidate_vbmeta_endio(struct bio *bio)
{
if (bio->bi_error)
DMERR("invalidate_vbmeta_endio: error %d", bio->bi_error);
complete(bio->bi_private);
}
@ -30,20 +33,19 @@ static int invalidate_vbmeta_submit(struct bio *bio,
bio->bi_private = &wait;
bio->bi_end_io = invalidate_vbmeta_endio;
bio->bi_bdev = bdev;
bio->bi_rw = rw;
bio->bi_iter.bi_sector = 0;
if (access_last_sector) {
sector_t last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1;
sector_t last_sector;
last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1;
bio->bi_iter.bi_sector = last_sector;
}
bio->bi_vcnt = 1;
bio->bi_iter.bi_idx = 0;
bio->bi_iter.bi_size = 512;
bio->bi_iter.bi_bvec_done = 0;
bio->bi_rw = rw;
bio->bi_io_vec[0].bv_page = page;
bio->bi_io_vec[0].bv_len = 512;
bio->bi_io_vec[0].bv_offset = 0;
if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
DMERR("invalidate_vbmeta_submit: bio_add_page error");
return -EIO;
}
submit_bio(rw, bio);
/* Wait up to 2 seconds for completion or fail. */
@ -65,6 +67,9 @@ static int invalidate_vbmeta(dev_t vbmeta_devt)
int rw = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;
int access_last_sector = 0;
DMINFO("invalidate_vbmeta: acting on device %d:%d",
MAJOR(vbmeta_devt), MINOR(vbmeta_devt));
/* First we open the device for reading. */
dev_mode = FMODE_READ | FMODE_EXCL;
bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
@ -115,7 +120,7 @@ static int invalidate_vbmeta(dev_t vbmeta_devt)
goto failed_to_submit_read;
}
if (memcmp("AVBf", page_address(page) + offset, 4) != 0) {
DMERR("invalidate_vbmeta called on non-vbmeta partition");
DMERR("invalidate_vbmeta on non-vbmeta partition");
ret = -EINVAL;
goto invalid_header;
}
@ -175,6 +180,11 @@ void dm_verity_avb_error_handler(void)
DMINFO("AVB error handler called for %s", avb_vbmeta_device);
if (strcmp(avb_invalidate_on_error, "yes") != 0) {
DMINFO("Not configured to invalidate");
return;
}
if (avb_vbmeta_device[0] == '\0') {
DMERR("avb_vbmeta_device parameter not set");
goto fail_no_dev;
@ -215,3 +225,5 @@ MODULE_LICENSE("GPL");
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "androidboot.vbmeta."
module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0);
module_param_string(invalidate_on_error, avb_invalidate_on_error,
sizeof(avb_invalidate_on_error), 0);

View file

@ -887,8 +887,12 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
else
*result_key = le64_to_cpu(ro_node(s)->keys[0]);
if (next_block || flags & INTERNAL_NODE)
block = value64(ro_node(s), i);
if (next_block || flags & INTERNAL_NODE) {
if (find_highest)
block = value64(ro_node(s), i);
else
block = value64(ro_node(s), 0);
}
} while (flags & INTERNAL_NODE);

View file

@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
int r;
uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_dec(&smd->ll, b, &ev);
r = sm_ll_dec(&smd->ll, b, &ev);
if (!r && (ev == SM_FREE)) {
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (!r && !old_count)
smd->nr_allocated_this_transaction--;
}
return r;
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)

View file

@ -2232,6 +2232,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
err = -ENOMEM;
mutex_unlock(&conf->cache_size_mutex);
conf->slab_cache = sc;
conf->active_name = 1-conf->active_name;
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@ -2249,8 +2253,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
}
/* critical section pass, GFP_NOIO no longer needed */
conf->slab_cache = sc;
conf->active_name = 1-conf->active_name;
if (!err)
conf->pool_size = newsize;
return err;

View file

@ -2678,7 +2678,9 @@ static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops = {
FE_CAN_MUTE_TS |
FE_CAN_2G_MODULATION,
.frequency_min = 42000000,
.frequency_max = 1002000000
.frequency_max = 1002000000,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000
},
.init = cxd2841er_init_tc,
.sleep = cxd2841er_sleep_tc,

View file

@ -173,6 +173,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
}
s5p_mfc_clock_on();
ret = s5p_mfc_init_hw(dev);
s5p_mfc_clock_off();
if (ret)
mfc_err("Failed to reinit FW\n");
}

View file

@ -1321,8 +1321,8 @@ static int mceusb_dev_probe(struct usb_interface *intf,
}
}
}
if (ep_in == NULL) {
dev_dbg(&intf->dev, "inbound and/or endpoint not found");
if (!ep_in || !ep_out) {
dev_dbg(&intf->dev, "required endpoints not found\n");
return -ENODEV;
}

View file

@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
int i;
tuner_dbg("%s called\n", __func__);
/* free allocated f/w string */
if (priv->fname != firmware_name)
kfree(priv->fname);
priv->fname = NULL;
priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (!priv->firm)
return;
@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
priv->firm = NULL;
priv->firm_size = 0;
priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
static int load_all_firmwares(struct dvb_frontend *fe,
@ -884,9 +889,8 @@ read_not_reliable:
return 0;
fail:
priv->state = XC2028_NO_FIRMWARE;
free_firmware(priv);
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
retry_count++;
@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
mutex_lock(&xc2028_list_mutex);
/* only perform final cleanup if this is the last instance */
if (hybrid_tuner_report_instance_count(priv) == 1) {
if (hybrid_tuner_report_instance_count(priv) == 1)
free_firmware(priv);
kfree(priv->ctrl.fname);
priv->ctrl.fname = NULL;
}
if (priv)
hybrid_tuner_release_state(priv);
@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
/*
* Copy the config data.
* For the firmware name, keep a local copy of the string,
* in order to avoid troubles during device release.
*/
kfree(priv->ctrl.fname);
priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
if (p->fname) {
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
if (priv->ctrl.fname == NULL) {
rc = -ENOMEM;
goto unlock;
}
}
/*
* If firmware name changed, frees firmware. As free_firmware will
@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
priv->fname = priv->ctrl.fname;
priv->fname = kstrdup(p->fname, GFP_KERNEL);
else
priv->fname = firmware_name;
if (!priv->fname) {
rc = -ENOMEM;
goto unlock;
}
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,

View file

@ -671,10 +671,8 @@ static int cx231xx_audio_init(struct cx231xx *dev)
spin_lock_init(&adev->slock);
err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
if (err < 0) {
snd_card_free(card);
return err;
}
if (err < 0)
goto err_free_card;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_cx231xx_pcm_capture);
@ -688,10 +686,9 @@ static int cx231xx_audio_init(struct cx231xx *dev)
INIT_WORK(&dev->wq_trigger, audio_trigger);
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
return err;
}
if (err < 0)
goto err_free_card;
adev->sndcard = card;
adev->udev = dev->udev;
@ -701,6 +698,11 @@ static int cx231xx_audio_init(struct cx231xx *dev)
hs_config_info[0].interface_info.
audio_index + 1];
if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
err = -ENODEV;
goto err_free_card;
}
adev->end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@ -710,13 +712,20 @@ static int cx231xx_audio_init(struct cx231xx *dev)
"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
adev->end_point_addr, adev->num_alt);
adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
if (adev->alt_max_pkt_size == NULL)
return -ENOMEM;
if (!adev->alt_max_pkt_size) {
err = -ENOMEM;
goto err_free_card;
}
for (i = 0; i < adev->num_alt; i++) {
u16 tmp =
le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
u16 tmp;
if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
err = -ENODEV;
goto err_free_pkt_size;
}
tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
wMaxPacketSize);
adev->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@ -726,6 +735,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
}
return 0;
err_free_pkt_size:
kfree(adev->alt_max_pkt_size);
err_free_card:
snd_card_free(card);
return err;
}
static int cx231xx_audio_fini(struct cx231xx *dev)

View file

@ -1447,6 +1447,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
uif = udev->actconfig->interface[idx];
if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
return -ENODEV;
dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
dev->video_mode.num_alt = uif->num_altsetting;
@ -1460,7 +1463,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
return -ENOMEM;
for (i = 0; i < dev->video_mode.num_alt; i++) {
u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
u16 tmp;
if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
return -ENODEV;
tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
dev_dbg(dev->dev,
"Alternate setting %i, max size= %i\n", i,
@ -1477,6 +1485,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
}
uif = udev->actconfig->interface[idx];
if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
return -ENODEV;
dev->vbi_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@ -1493,8 +1504,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
return -ENOMEM;
for (i = 0; i < dev->vbi_mode.num_alt; i++) {
u16 tmp =
le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
u16 tmp;
if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
return -ENODEV;
tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->vbi_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@ -1514,6 +1529,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
}
uif = udev->actconfig->interface[idx];
if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
return -ENODEV;
dev->sliced_cc_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@ -1528,7 +1546,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
return -ENOMEM;
for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
u16 tmp;
if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
return -ENODEV;
tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->sliced_cc_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@ -1693,6 +1716,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
uif = udev->actconfig->interface[idx];
if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
retval = -ENODEV;
goto err_video_alt;
}
dev->ts1_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].
desc.bEndpointAddress;
@ -1710,7 +1738,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
for (i = 0; i < dev->ts1_mode.num_alt; i++) {
u16 tmp = le16_to_cpu(uif->altsetting[i].
u16 tmp;
if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
retval = -ENODEV;
goto err_video_alt;
}
tmp = le16_to_cpu(uif->altsetting[i].
endpoint[isoc_pipe].desc.
wMaxPacketSize);
dev->ts1_mode.alt_max_pkt_size[i] =

View file

@ -783,6 +783,9 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
return -ENODEV;
purb = usb_alloc_urb(0, GFP_KERNEL);
if (purb == NULL) {
err("rc usb alloc urb failed");

View file

@ -78,6 +78,9 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
u8 *s, *r = NULL;
int ret = 0;
if (4 + rlen > 64)
return -EIO;
s = kzalloc(wlen+4, GFP_KERNEL);
if (!s)
return -ENOMEM;
@ -381,6 +384,22 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
read = msg[i].flags & I2C_M_RD;
if (3 + msg[i].len > sizeof(obuf)) {
err("i2c wr len=%d too high", msg[i].len);
break;
}
if (write_read) {
if (3 + msg[i+1].len > sizeof(ibuf)) {
err("i2c rd len=%d too high", msg[i+1].len);
break;
}
} else if (read) {
if (3 + msg[i].len > sizeof(ibuf)) {
err("i2c rd len=%d too high", msg[i].len);
break;
}
}
obuf[0] = (msg[i].addr << 1) | (write_read | read);
if (read)
obuf[1] = 0;

View file

@ -188,6 +188,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
return -EIO;
}
if (alt->desc.bNumEndpoints < 2)
return -ENODEV;
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;

View file

@ -1523,7 +1523,14 @@ static int usbvision_probe(struct usb_interface *intf,
}
for (i = 0; i < usbvision->num_alt; i++) {
u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
u16 tmp;
if (uif->altsetting[i].desc.bNumEndpoints < 2) {
ret = -ENODEV;
goto err_pkt;
}
tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
wMaxPacketSize);
usbvision->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);

View file

@ -604,6 +604,14 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
ptr = pdest = frm->lpvbits;
if (frm->ulState == ZR364XX_READ_IDLE) {
if (purb->actual_length < 128) {
/* header incomplete */
dev_info(&cam->udev->dev,
"%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
__func__, purb->actual_length);
return -EINVAL;
}
frm->ulState = ZR364XX_READ_FRAME;
frm->cur_size = 0;

View file

@ -49,7 +49,8 @@ struct io_stats {
#define UID_STATE_TOTAL_CURR 2
#define UID_STATE_TOTAL_LAST 3
#define UID_STATE_SIZE 4
#define UID_STATE_DEAD_TASKS 4
#define UID_STATE_SIZE 5
struct uid_entry {
uid_t uid;
@ -214,35 +215,44 @@ static u64 compute_write_bytes(struct task_struct *task)
return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
}
static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
struct task_struct *task)
static void add_uid_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot)
{
struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
struct io_stats *io_slot = &uid_entry->io[slot];
io_curr->read_bytes += task->ioac.read_bytes;
io_curr->write_bytes += compute_write_bytes(task);
io_curr->rchar += task->ioac.rchar;
io_curr->wchar += task->ioac.wchar;
io_curr->fsync += task->ioac.syscfs;
io_slot->read_bytes += task->ioac.read_bytes;
io_slot->write_bytes += compute_write_bytes(task);
io_slot->rchar += task->ioac.rchar;
io_slot->wchar += task->ioac.wchar;
io_slot->fsync += task->ioac.syscfs;
}
static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
struct task_struct *task)
static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
struct io_stats *io_curr,
struct io_stats *io_last,
struct io_stats *io_dead)
{
struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
io_last->read_bytes;
io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
io_last->write_bytes;
io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
io_last->read_bytes -= task->ioac.read_bytes;
io_last->write_bytes -= compute_write_bytes(task);
io_last->rchar -= task->ioac.rchar;
io_last->wchar -= task->ioac.wchar;
io_last->fsync -= task->ioac.syscfs;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
memset(io_dead, 0, sizeof(struct io_stats));
}
static void update_io_stats_all_locked(void)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
struct io_stats *io_bucket, *io_curr, *io_last;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
uid_t uid;
@ -257,70 +267,38 @@ static void update_io_stats_all_locked(void)
uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
add_uid_io_curr_stats(uid_entry, task);
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
io_bucket = &uid_entry->io[uid_entry->state];
io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
io_bucket->read_bytes +=
io_curr->read_bytes - io_last->read_bytes;
io_bucket->write_bytes +=
io_curr->write_bytes - io_last->write_bytes;
io_bucket->rchar += io_curr->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync - io_last->fsync;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
}
static void update_io_stats_uid_locked(uid_t target_uid)
static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
struct io_stats *io_bucket, *io_curr, *io_last;
struct user_namespace *user_ns = current_user_ns();
uid_entry = find_or_register_uid(target_uid);
if (!uid_entry)
return;
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
rcu_read_lock();
do_each_thread(temp, task) {
if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
continue;
add_uid_io_curr_stats(uid_entry, task);
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
io_bucket = &uid_entry->io[uid_entry->state];
io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
io_bucket->read_bytes +=
io_curr->read_bytes - io_last->read_bytes;
io_bucket->write_bytes +=
io_curr->write_bytes - io_last->write_bytes;
io_bucket->rchar += io_curr->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync - io_last->fsync;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
static int uid_io_show(struct seq_file *m, void *v)
@ -405,7 +383,7 @@ static ssize_t uid_procstat_write(struct file *file,
return count;
}
update_io_stats_uid_locked(uid);
update_io_stats_uid_locked(uid_entry);
uid_entry->state = state;
@ -443,8 +421,7 @@ static int process_notifier(struct notifier_block *self,
uid_entry->utime += utime;
uid_entry->stime += stime;
update_io_stats_uid_locked(uid);
clean_uid_io_last_stats(uid_entry, task);
add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);

View file

@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
* are "42101001.sb" or "42101002.sb"
*/
sprintf(stir421x_fw_name, "4210%4X.sb",
self->usbdev->descriptor.bcdDevice);
le16_to_cpu(self->usbdev->descriptor.bcdDevice));
ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
if (ret < 0)
return ret;

View file

@ -754,6 +754,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View file

@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
{ USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
{ USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */
@ -1216,6 +1217,9 @@ static int send_eject_command(struct usb_interface *interface)
u8 bulk_out_ep;
int r;
if (iface_desc->desc.bNumEndpoints < 2)
return -ENODEV;
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;

View file

@ -2135,9 +2135,7 @@ void cw1200_mcast_timeout(unsigned long arg)
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size, bool amsdu)
struct ieee80211_ampdu_params *params)
{
/* Aggregation is implemented fully in firmware,
* including block ack negotiation. Do not allow

View file

@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int
il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 * ssn,
u8 buf_size, bool amsdu)
struct ieee80211_ampdu_params *params)
{
struct il_priv *il = hw->priv;
int ret = -EINVAL;
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);

View file

@ -729,12 +729,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size, bool amsdu)
struct ieee80211_ampdu_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",

View file

@ -826,13 +826,16 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn, u8 buf_size, bool amsdu)
struct ieee80211_ampdu_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
bool tx_agg_ref = false;
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);

View file

@ -947,6 +947,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(card->cmd_buf);
}
return 0;
}
@ -1513,6 +1514,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
card->cmd_buf = skb;
/*
* Need to keep a reference, since core driver might free up this
* buffer before we've unmapped it.
*/
skb_get(skb);
/* To send a command, the driver will:
1. Write the 64bit physical address of the data buffer to
@ -1610,6 +1616,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}

View file

@ -359,6 +359,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
return rtl8821ae_phy_rf6052_config(hw);
}
static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tmp;
switch (rtlhal->rfe_type) {
case 3:
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
break;
case 4:
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
break;
case 5:
rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
case 1:
if (rtlpriv->btcoexist.bt_coexistence) {
rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
0x77777777);
rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
}
case 0:
case 2:
default:
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
}
}
static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tmp;
switch (rtlhal->rfe_type) {
case 0:
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
break;
case 1:
if (rtlpriv->btcoexist.bt_coexistence) {
rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
0x77337717);
rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
} else {
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
0x77337717);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
0x77337717);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
}
break;
case 3:
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
break;
case 5:
rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
break;
case 2:
case 4:
default:
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
break;
}
}
u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
u8 rf_path)
{
@ -553,14 +654,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* 0x82C[1:0] = 2b'00 */
rtl_set_bbreg(hw, 0x82c, 0x3, 0);
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
0x77777777);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
0x77777777);
rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
_rtl8812ae_phy_set_rfe_reg_24g(hw);
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
@ -615,14 +711,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* 0x82C[1:0] = 2'b00 */
rtl_set_bbreg(hw, 0x82c, 0x3, 1);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
0x77337777);
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
0x77337777);
rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
_rtl8812ae_phy_set_rfe_reg_5g(hw);
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);

View file

@ -2424,6 +2424,7 @@
#define BMASKH4BITS 0xf0000000
#define BMASKOFDM_D 0xffc00000
#define BMASKCCK 0x3f3f3f3f
#define BMASKRFEINV 0x3ff00000
#define BRFREGOFFSETMASK 0xfffff

View file

@ -206,5 +206,33 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
mbox->sc_pwd_len,
mbox->sc_pwd);
if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) {
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
u8 link_id = mbox->rx_ba_link_id;
u8 win_size = mbox->rx_ba_win_size;
const u8 *addr;
wlvif = wl->links[link_id].wlvif;
vif = wl12xx_wlvif_to_vif(wlvif);
/* Update RX aggregation window size and call
* MAC routine to stop active RX aggregations for this link
*/
if (wlvif->bss_type != BSS_TYPE_AP_BSS)
addr = vif->bss_conf.bssid;
else
addr = wl->links[link_id].addr;
sta = ieee80211_find_sta(vif, addr);
if (sta) {
sta->max_rx_aggregation_subframes = win_size;
ieee80211_stop_rx_ba_session(vif,
wl->links[link_id].ba_bitmap,
addr);
}
}
return 0;
}

View file

@ -38,6 +38,7 @@ enum {
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
RX_BA_WIN_SIZE_CHANGE_EVENT_ID = BIT(21),
SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
TIME_SYNC_EVENT_ID = BIT(24),

View file

@ -1029,7 +1029,8 @@ static int wl18xx_boot(struct wl1271 *wl)
DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
SMART_CONFIG_SYNC_EVENT_ID |
SMART_CONFIG_DECODE_EVENT_ID |
TIME_SYNC_EVENT_ID;
TIME_SYNC_EVENT_ID |
RX_BA_WIN_SIZE_CHANGE_EVENT_ID;
wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;

View file

@ -1419,7 +1419,8 @@ out:
/* setup BA session receiver setting in the FW. */
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid)
u16 ssn, bool enable, u8 peer_hlid,
u8 win_size)
{
struct wl1271_acx_ba_receiver_setup *acx;
int ret;
@ -1435,7 +1436,7 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
acx->hlid = peer_hlid;
acx->tid = tid_index;
acx->enable = enable;
acx->win_size = wl->conf.ht.rx_ba_win_size;
acx->win_size = win_size;
acx->ssn = ssn;
ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,

View file

@ -1112,7 +1112,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid);
u16 ssn, bool enable, u8 peer_hlid,
u8 win_size);
int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u64 *mactime);
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,

View file

@ -5328,7 +5328,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
hlid);
hlid,
params->buf_size);
if (!ret) {
*ba_bitmap |= BIT(tid);
wl->ba_rx_session_count++;
@ -5349,7 +5351,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
hlid);
hlid, 0);
if (!ret) {
*ba_bitmap &= ~BIT(tid);
wl->ba_rx_session_count--;

View file

@ -260,7 +260,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
if (!parser->range || parser->range + parser->np > parser->end)
return NULL;
range->pci_space = parser->range[0];
range->pci_space = be32_to_cpup(parser->range);
range->flags = of_bus_pci_get_flags(parser->range);
range->pci_addr = of_read_number(parser->range + 1, ns);
range->cpu_addr = of_translate_address(parser->node,

View file

@ -973,15 +973,19 @@ void pci_remove_legacy_files(struct pci_bus *b)
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
enum pci_mmap_api mmap_api)
{
unsigned long nr, start, size, pci_start;
unsigned long nr, start, size;
resource_size_t pci_start = 0, pci_end;
if (pci_resource_len(pdev, resno) == 0)
return 0;
nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
if (mmap_api == PCI_MMAP_PROCFS) {
pci_resource_to_user(pdev, resno, &pdev->resource[resno],
&pci_start, &pci_end);
pci_start >>= PAGE_SHIFT;
}
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
return 1;

View file

@ -1732,8 +1732,8 @@ static void pci_pme_list_scan(struct work_struct *work)
}
}
if (!list_empty(&pci_pme_list))
schedule_delayed_work(&pci_pme_work,
msecs_to_jiffies(PME_TIMEOUT));
queue_delayed_work(system_freezable_wq, &pci_pme_work,
msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
@ -1798,8 +1798,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
if (list_is_singular(&pci_pme_list))
schedule_delayed_work(&pci_pme_work,
msecs_to_jiffies(PME_TIMEOUT));
queue_delayed_work(system_freezable_wq,
&pci_pme_work,
msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
} else {
mutex_lock(&pci_pme_list_mutex);

View file

@ -311,8 +311,7 @@ static int tps_65023_probe(struct i2c_client *client,
/* Enable setting output voltage by I2C */
regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
TPS65023_REG_CTRL2_CORE_ADJ,
TPS65023_REG_CTRL2_CORE_ADJ);
TPS65023_REG_CTRL2_CORE_ADJ, 0);
return 0;
}

View file

@ -429,6 +429,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
global_page_state(NR_FILE_PAGES) + zcache_pages())
other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
global_page_state(NR_SHMEM) -
global_page_state(NR_UNEVICTABLE) -
total_swapcache_pages();
else
other_file = 0;

View file

@ -610,7 +610,7 @@ static void jr3_pci_poll_dev(unsigned long data)
s = &dev->subdevices[i];
spriv = s->private;
if (now > spriv->next_time_min) {
if (time_after_eq(now, spriv->next_time_min)) {
struct jr3_pci_poll_delay sub_delay;
sub_delay = jr3_pci_poll_subdevice(s);
@ -726,11 +726,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
s->insn_read = jr3_pci_ai_insn_read;
spriv = jr3_pci_alloc_spriv(dev, s);
if (spriv) {
/* Channel specific range and maxdata */
s->range_table_list = spriv->range_table_list;
s->maxdata_list = spriv->maxdata_list;
}
if (!spriv)
return -ENOMEM;
/* Channel specific range and maxdata */
s->range_table_list = spriv->range_table_list;
s->maxdata_list = spriv->maxdata_list;
}
/* Reset DSP card */

View file

@ -670,14 +670,14 @@ static int __init gdm_usb_mux_init(void)
static void __exit gdm_usb_mux_exit(void)
{
unregister_lte_tty_driver();
if (mux_rx_wq) {
flush_workqueue(mux_rx_wq);
destroy_workqueue(mux_rx_wq);
}
usb_deregister(&gdm_mux_driver);
unregister_lte_tty_driver();
}
module_init(gdm_usb_mux_init);

View file

@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
switch (variable) {
case HW_VAR_BSSID:
rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
/* BSSIDR 2 byte alignment */
rtl92e_writew(dev, BSSIDR, *(u16 *)val);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
break;
case HW_VAR_MEDIA_STATUS:
@ -626,7 +627,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
curCR = rtl92e_readl(dev, EPROM_CMD);
curCR = rtl92e_readw(dev, EPROM_CMD);
RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
curCR);
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@ -963,8 +964,8 @@ static void _rtl92e_net_update(struct net_device *dev)
rtl92e_config_rate(dev, &rate_config);
priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
priv->basic_rate = rate_config &= 0x15f;
rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_writew(dev, ATIMWND, 2);

View file

@ -50,15 +50,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
int status = 0;
u8 *usb_buffer;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return STATUS_FAILURE;
mutex_lock(&priv->usb_lock);
usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
if (!usb_buffer) {
mutex_unlock(&priv->usb_lock);
return -ENOMEM;
}
status = usb_control_msg(priv->usb,
usb_sndctrlpipe(priv->usb, 0), request, 0x40, value,
index, buffer, length, USB_CTL_WAIT);
usb_sndctrlpipe(priv->usb, 0),
request, 0x40, value,
index, usb_buffer, length, USB_CTL_WAIT);
kfree(usb_buffer);
mutex_unlock(&priv->usb_lock);
@ -78,15 +88,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
int status;
u8 *usb_buffer;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return STATUS_FAILURE;
mutex_lock(&priv->usb_lock);
usb_buffer = kmalloc(length, GFP_KERNEL);
if (!usb_buffer) {
mutex_unlock(&priv->usb_lock);
return -ENOMEM;
}
status = usb_control_msg(priv->usb,
usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value,
index, buffer, length, USB_CTL_WAIT);
usb_rcvctrlpipe(priv->usb, 0),
request, 0xc0, value,
index, usb_buffer, length, USB_CTL_WAIT);
if (status == length)
memcpy(buffer, usb_buffer, length);
kfree(usb_buffer);
mutex_unlock(&priv->usb_lock);

View file

@ -4821,6 +4821,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
continue;
}
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);

View file

@ -725,11 +725,8 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
/*
* iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
*/
ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
config_item_name(acl_ci), cmdsn_depth, 1);
ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
"InitiatorName: %s\n", config_item_name(wwn_ci),
@ -1593,42 +1590,31 @@ static int lio_tpg_check_prot_fabric_only(
}
/*
* Called with spin_lock_irq(struct se_portal_group->session_lock) held
* or not held.
*
* Also, this function calls iscsit_inc_session_usage_count() on the
* This function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
*/
static int lio_tpg_shutdown_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
struct se_portal_group *se_tpg = se_sess->se_tpg;
bool local_lock = false;
if (!spin_is_locked(&se_tpg->session_lock)) {
spin_lock_irq(&se_tpg->session_lock);
local_lock = true;
}
struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
if (local_lock)
spin_unlock_irq(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return 0;
}
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_irq(&se_tpg->session_lock);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
if (!local_lock)
spin_lock_irq(&se_tpg->session_lock);
return 1;
}

View file

@ -195,6 +195,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
atomic_set(&sess_p->session_fall_back_to_erl0, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);

Some files were not shown because too many files have changed in this diff Show more