Merge android-4.4.114 (fe09418
) into msm-4.4
* refs/heads/tmp-fe09418 Linux 4.4.114 nfsd: auth: Fix gid sorting when rootsquash enabled net: tcp: close sock if net namespace is exiting flow_dissector: properly cap thoff field ipv4: Make neigh lookup keys for loopback/point-to-point devices be INADDR_ANY net: Allow neigh contructor functions ability to modify the primary_key vmxnet3: repair memory leak sctp: return error if the asoc has been peeled off in sctp_wait_for_sndbuf sctp: do not allow the v4 socket to bind a v4mapped v6 address r8169: fix memory corruption on retrieval of hardware statistics. pppoe: take ->needed_headroom of lower device into account on xmit net: qdisc_pkt_len_init() should be more robust tcp: __tcp_hdrlen() helper net: igmp: fix source address check for IGMPv3 reports lan78xx: Fix failure in USB Full Speed ipv6: ip6_make_skb() needs to clear cork.base.dst ipv6: fix udpv6 sendmsg crash caused by too small MTU ipv6: Fix getsockopt() for sockets with default IPV6_AUTOFLOWLABEL dccp: don't restart ccid2_hc_tx_rto_expire() if sk in closed state hrtimer: Reset hrtimer cpu base proper on CPU hotplug x86/microcode/intel: Extend BDW late-loading further with LLC size check eventpoll.h: add missing epoll event masks vsyscall: Fix permissions for emulate mode with KAISER/PTI um: link vmlinux with -no-pie usbip: prevent leaking socket pointer address in messages usbip: fix stub_rx: harden CMD_SUBMIT path to handle malicious input usbip: fix stub_rx: get_pipe() to validate endpoint number usb: usbip: Fix possible deadlocks reported by lockdep Input: trackpoint - force 3 buttons if 0 button is reported Revert "module: Add retpoline tag to VERMAGIC" scsi: libiscsi: fix shifting of DID_REQUEUE host byte fs/fcntl: f_setown, avoid undefined behaviour reiserfs: Don't clear SGID when inheriting ACLs reiserfs: don't preallocate blocks for extended attributes reiserfs: fix race in prealloc discard ext2: Don't clear SGID when inheriting ACLs netfilter: xt_osf: Add missing permission checks netfilter: nfnetlink_cthelper: Add missing permission checks netfilter: fix IS_ERR_VALUE usage netfilter: use fwmark_reflect in nf_send_reset netfilter: nf_conntrack_sip: extend request line validation netfilter: restart search if moved to other chain netfilter: nfnetlink_queue: reject verdict request from different portid netfilter: nf_ct_expect: remove the redundant slash when policy name is empty netfilter: nf_dup_ipv6: set again FLOWI_FLAG_KNOWN_NH at flowi6_flags netfilter: arp_tables: fix invoking 32bit "iptable -P INPUT ACCEPT" failed in 64bit kernel netfilter: x_tables: speed up jump target validation ACPICA: Namespace: fix operand cache leak ACPI / scan: Prefer devices without _HID/_CID for _ADR matching ACPI / processor: Avoid reserving IO regions too early x86/ioapic: Fix incorrect pointers in ioapic_setup_resources() ipc: msg, make msgrcv work with LONG_MIN mm, page_alloc: fix potential false positive in __zone_watermark_ok cma: fix calculation of aligned offset hwpoison, memcg: forcibly uncharge LRU pages mm/mmap.c: do not blow on PROT_NONE MAP_FIXED holes in the stack fs/select: add vmalloc fallback for select(2) mmc: sdhci-of-esdhc: add/remove some quirks according to vendor version PCI: layerscape: Fix MSG TLP drop setting PCI: layerscape: Add "fsl,ls2085a-pcie" compatible ID drivers: base: cacheinfo: fix boot error message when acpi is enabled drivers: base: cacheinfo: fix x86 with CONFIG_OF enabled Prevent timer value 0 for MWAITX timers: Plug locking race vs. timer migration time: Avoid undefined behaviour in ktime_add_safe() PM / sleep: declare __tracedata symbols as char[] rather than char can: af_can: canfd_rcv(): replace WARN_ONCE by pr_warn_once can: af_can: can_rcv(): replace WARN_ONCE by pr_warn_once sched/deadline: Use the revised wakeup rule for suspending constrained dl tasks x86/retpoline: Fill RSB on context switch for affected CPUs x86/cpu/intel: Introduce macros for Intel family numbers x86/microcode/intel: Fix BDW late-loading revision check usbip: Fix potential format overflow in userspace tools usbip: Fix implicit fallthrough warning usbip: prevent vhci_hcd driver from leaking a socket pointer address x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels ANDROID: sched: EAS: check energy_aware() before calling select_energy_cpu_brute() in up-migrate path UPSTREAM: eventpoll.h: add missing epoll event masks ANDROID: xattr: Pass EOPNOTSUPP to permission2 Conflicts: kernel/sched/fair.c Change-Id: I15005cb3bc039f4361d25ed2e22f8175b3d7ca96 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
38cacfd106
93 changed files with 919 additions and 365 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 113
|
||||
SUBLEVEL = 114
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ archheaders:
|
|||
archprepare: include/generated/user_constants.h
|
||||
|
||||
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
|
||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
|
||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
|
||||
|
||||
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
|
||||
$(call cc-option, -fno-stack-protector,) \
|
||||
|
|
|
@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
|
|||
#else
|
||||
EMULATE;
|
||||
#endif
|
||||
unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
|
||||
|
||||
static int __init vsyscall_setup(char *str)
|
||||
{
|
||||
|
@ -336,11 +337,11 @@ void __init map_vsyscall(void)
|
|||
extern char __vsyscall_page;
|
||||
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
|
||||
|
||||
if (vsyscall_mode != NATIVE)
|
||||
vsyscall_pgprot = __PAGE_KERNEL_VVAR;
|
||||
if (vsyscall_mode != NONE)
|
||||
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
|
||||
vsyscall_mode == NATIVE
|
||||
? PAGE_KERNEL_VSYSCALL
|
||||
: PAGE_KERNEL_VVAR);
|
||||
__pgprot(vsyscall_pgprot));
|
||||
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
||||
(unsigned long)VSYSCALL_ADDR);
|
||||
|
|
|
@ -199,6 +199,7 @@
|
|||
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
|
||||
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
|
||||
|
|
68
arch/x86/include/asm/intel-family.h
Normal file
68
arch/x86/include/asm/intel-family.h
Normal file
|
@ -0,0 +1,68 @@
|
|||
#ifndef _ASM_X86_INTEL_FAMILY_H
|
||||
#define _ASM_X86_INTEL_FAMILY_H
|
||||
|
||||
/*
|
||||
* "Big Core" Processors (Branded as Core, Xeon, etc...)
|
||||
*
|
||||
* The "_X" parts are generally the EP and EX Xeons, or the
|
||||
* "Extreme" ones, like Broadwell-E.
|
||||
*
|
||||
* Things ending in "2" are usually because we have no better
|
||||
* name for them. There's no processor called "WESTMERE2".
|
||||
*/
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
#define INTEL_FAM6_CORE2_MEROM 0x0F
|
||||
#define INTEL_FAM6_CORE2_MEROM_L 0x16
|
||||
#define INTEL_FAM6_CORE2_PENRYN 0x17
|
||||
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
|
||||
|
||||
#define INTEL_FAM6_NEHALEM 0x1E
|
||||
#define INTEL_FAM6_NEHALEM_EP 0x1A
|
||||
#define INTEL_FAM6_NEHALEM_EX 0x2E
|
||||
#define INTEL_FAM6_WESTMERE 0x25
|
||||
#define INTEL_FAM6_WESTMERE2 0x1F
|
||||
#define INTEL_FAM6_WESTMERE_EP 0x2C
|
||||
#define INTEL_FAM6_WESTMERE_EX 0x2F
|
||||
|
||||
#define INTEL_FAM6_SANDYBRIDGE 0x2A
|
||||
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
|
||||
#define INTEL_FAM6_IVYBRIDGE 0x3A
|
||||
#define INTEL_FAM6_IVYBRIDGE_X 0x3E
|
||||
|
||||
#define INTEL_FAM6_HASWELL_CORE 0x3C
|
||||
#define INTEL_FAM6_HASWELL_X 0x3F
|
||||
#define INTEL_FAM6_HASWELL_ULT 0x45
|
||||
#define INTEL_FAM6_HASWELL_GT3E 0x46
|
||||
|
||||
#define INTEL_FAM6_BROADWELL_CORE 0x3D
|
||||
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
|
||||
#define INTEL_FAM6_BROADWELL_GT3E 0x47
|
||||
#define INTEL_FAM6_BROADWELL_X 0x4F
|
||||
|
||||
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
|
||||
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
|
||||
#define INTEL_FAM6_SKYLAKE_X 0x55
|
||||
#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
|
||||
#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
|
||||
#define INTEL_FAM6_ATOM_LINCROFT 0x26
|
||||
#define INTEL_FAM6_ATOM_PENWELL 0x27
|
||||
#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
|
||||
#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
|
||||
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
|
||||
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
|
||||
|
||||
/* Xeon Phi */
|
||||
|
||||
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
|
||||
|
||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
|
@ -574,7 +574,7 @@ static inline void sync_core(void)
|
|||
{
|
||||
int tmp;
|
||||
|
||||
#ifdef CONFIG_M486
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Do a CPUID if available, otherwise do a jump. The jump
|
||||
* can conveniently enough be the jump around CPUID.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_SWITCH_TO_H
|
||||
#define _ASM_X86_SWITCH_TO_H
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
struct task_struct; /* one of the stranger aspects of C forward declarations */
|
||||
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
* with userspace addresses. On CPUs where those concerns
|
||||
* exist, overwrite the RSB with entries which capture
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
#define __retpoline_fill_return_buffer \
|
||||
ALTERNATIVE("jmp 910f", \
|
||||
__stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
|
||||
X86_FEATURE_RSB_CTXSW) \
|
||||
"910:\n\t"
|
||||
#else
|
||||
#define __retpoline_fill_return_buffer
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Saving eflags is important. It switches not only IOPL between tasks,
|
||||
* it also protects other tasks from NT leaking through sysenter etc.
|
||||
|
@ -46,6 +65,7 @@ do { \
|
|||
"movl $1f,%[prev_ip]\n\t" /* save EIP */ \
|
||||
"pushl %[next_ip]\n\t" /* restore EIP */ \
|
||||
__switch_canary \
|
||||
__retpoline_fill_return_buffer \
|
||||
"jmp __switch_to\n" /* regparm call */ \
|
||||
"1:\t" \
|
||||
"popl %%ebp\n\t" /* restore EBP */ \
|
||||
|
@ -100,6 +120,23 @@ do { \
|
|||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
* with userspace addresses. On CPUs where those concerns
|
||||
* exist, overwrite the RSB with entries which capture
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
#define __retpoline_fill_return_buffer \
|
||||
ALTERNATIVE("jmp 910f", \
|
||||
__stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
|
||||
X86_FEATURE_RSB_CTXSW) \
|
||||
"910:\n\t"
|
||||
#else
|
||||
#define __retpoline_fill_return_buffer
|
||||
#endif
|
||||
|
||||
/*
|
||||
* There is no need to save or restore flags, because flags are always
|
||||
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
||||
|
@ -112,6 +149,7 @@ do { \
|
|||
"call __switch_to\n\t" \
|
||||
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
||||
__switch_canary \
|
||||
__retpoline_fill_return_buffer \
|
||||
"movq %P[thread_info](%%rsi),%%r8\n\t" \
|
||||
"movq %%rax,%%rdi\n\t" \
|
||||
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
|
||||
|
|
|
@ -13,6 +13,7 @@ extern void map_vsyscall(void);
|
|||
*/
|
||||
extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
|
||||
extern bool vsyscall_enabled(void);
|
||||
extern unsigned long vsyscall_pgprot;
|
||||
#else
|
||||
static inline void map_vsyscall(void) {}
|
||||
static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
||||
|
|
|
@ -2592,8 +2592,8 @@ static struct resource * __init ioapic_setup_resources(void)
|
|||
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
|
||||
mem += IOAPIC_RESOURCE_NAME_SIZE;
|
||||
ioapics[i].iomem_res = &res[num];
|
||||
num++;
|
||||
ioapics[i].iomem_res = res;
|
||||
}
|
||||
|
||||
ioapic_resources = res;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
|
||||
|
@ -154,6 +155,23 @@ disable:
|
|||
return SPECTRE_V2_CMD_NONE;
|
||||
}
|
||||
|
||||
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||
static bool __init is_skylake_era(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
|
@ -212,6 +230,24 @@ retpoline_auto:
|
|||
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP or KPTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
*
|
||||
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||
* or deactivated in favour of retpolines the RSB fill on context
|
||||
* switch is required.
|
||||
*/
|
||||
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Filling RSB on context switch\n");
|
||||
}
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
|
|
|
@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
|
|||
ci_leaf_init(this_leaf++, &id4_regs);
|
||||
__cache_cpumap_setup(cpu, idx, &id4_regs);
|
||||
}
|
||||
this_cpu_ci->cpu_map_populated = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,9 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
/* last level cache size per core */
|
||||
static int llc_size_per_core;
|
||||
|
||||
static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
|
||||
static struct mc_saved_data {
|
||||
unsigned int mc_saved_count;
|
||||
|
@ -996,15 +999,18 @@ static bool is_blacklisted(unsigned int cpu)
|
|||
|
||||
/*
|
||||
* Late loading on model 79 with microcode revision less than 0x0b000021
|
||||
* may result in a system hang. This behavior is documented in item
|
||||
* BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
|
||||
* and LLC size per core bigger than 2.5MB may result in a system hang.
|
||||
* This behavior is documented in item BDF90, #334165 (Intel Xeon
|
||||
* Processor E7-8800/4800 v4 Product Family).
|
||||
*/
|
||||
if (c->x86 == 6 &&
|
||||
c->x86_model == 79 &&
|
||||
c->x86_mask == 0x01 &&
|
||||
llc_size_per_core > 2621440 &&
|
||||
c->microcode < 0x0b000021) {
|
||||
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
|
||||
pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -1067,6 +1073,15 @@ static struct microcode_ops microcode_intel_ops = {
|
|||
.microcode_fini_cpu = microcode_fini_cpu,
|
||||
};
|
||||
|
||||
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 llc_size = c->x86_cache_size * 1024;
|
||||
|
||||
do_div(llc_size, c->x86_max_cores);
|
||||
|
||||
return (int)llc_size;
|
||||
}
|
||||
|
||||
struct microcode_ops * __init init_intel_microcode(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
@ -1077,6 +1092,8 @@ struct microcode_ops * __init init_intel_microcode(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
llc_size_per_core = calc_llc_size_per_core(c);
|
||||
|
||||
return µcode_intel_ops;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
|
|||
{
|
||||
u64 start, end, delay, loops = __loops;
|
||||
|
||||
/*
|
||||
* Timer value of 0 causes MWAITX to wait indefinitely, unless there
|
||||
* is a store on the memory monitored by MONITORX.
|
||||
*/
|
||||
if (loops == 0)
|
||||
return;
|
||||
|
||||
start = rdtsc_ordered();
|
||||
|
||||
for (;;) {
|
||||
|
|
|
@ -345,7 +345,7 @@ void __init kaiser_init(void)
|
|||
if (vsyscall_enabled())
|
||||
kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
|
||||
PAGE_SIZE,
|
||||
__PAGE_KERNEL_VSYSCALL);
|
||||
vsyscall_pgprot);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *percpu_vaddr = __per_cpu_user_mapped_start +
|
||||
|
|
|
@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
|||
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
|
||||
|
||||
pr->pblk = object.processor.pblk_address;
|
||||
|
||||
/*
|
||||
* We don't care about error returns - we just try to mark
|
||||
* these reserved so that nobody else is confused into thinking
|
||||
* that this region might be unused..
|
||||
*
|
||||
* (In particular, allocating the IO range for Cardbus)
|
||||
*/
|
||||
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -593,25 +593,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
|
|||
void acpi_ns_terminate(void)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_operand_object *prev;
|
||||
union acpi_operand_object *next;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_terminate);
|
||||
|
||||
#ifdef ACPI_EXEC_APP
|
||||
{
|
||||
union acpi_operand_object *prev;
|
||||
union acpi_operand_object *next;
|
||||
/* Delete any module-level code blocks */
|
||||
|
||||
/* Delete any module-level code blocks */
|
||||
|
||||
next = acpi_gbl_module_code_list;
|
||||
while (next) {
|
||||
prev = next;
|
||||
next = next->method.mutex;
|
||||
prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
|
||||
acpi_ut_remove_reference(prev);
|
||||
}
|
||||
next = acpi_gbl_module_code_list;
|
||||
while (next) {
|
||||
prev = next;
|
||||
next = next->method.mutex;
|
||||
prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
|
||||
acpi_ut_remove_reference(prev);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Free the entire namespace -- all nodes and all objects
|
||||
|
|
|
@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
|
|||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If the device has a _HID (or _CID) returning a valid ACPI/PNP
|
||||
* device ID, it is better to make it look less attractive here, so that
|
||||
* the other device with the same _ADR value (that may not have a valid
|
||||
* device ID) can be matched going forward. [This means a second spec
|
||||
* violation in a row, so whatever we do here is best effort anyway.]
|
||||
* If the device has a _HID returning a valid ACPI/PNP device ID, it is
|
||||
* better to make it look less attractive here, so that the other device
|
||||
* with the same _ADR value (that may not have a valid device ID) can be
|
||||
* matched going forward. [This means a second spec violation in a row,
|
||||
* so whatever we do here is best effort anyway.]
|
||||
*/
|
||||
return sta_present && list_empty(&adev->pnp.ids) ?
|
||||
return sta_present && !adev->pnp.type.platform_id ?
|
||||
FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
|
||||
}
|
||||
|
||||
|
|
|
@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
|
|||
if (!pr->flags.throttling)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We don't care about error returns - we just try to mark
|
||||
* these reserved so that nobody else is confused into thinking
|
||||
* that this region might be unused..
|
||||
*
|
||||
* (In particular, allocating the IO range for Cardbus)
|
||||
*/
|
||||
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
|
||||
|
||||
pr->throttling.state = 0;
|
||||
|
||||
duty_mask = pr->throttling.state_count - 1;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
|
|||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf, *sib_leaf;
|
||||
unsigned int index;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
ret = cache_setup_of_node(cpu);
|
||||
if (this_cpu_ci->cpu_map_populated)
|
||||
return 0;
|
||||
|
||||
if (of_have_populated_dt())
|
||||
ret = cache_setup_of_node(cpu);
|
||||
else if (!acpi_disabled)
|
||||
/* No cache property/hierarchy support yet in ACPI */
|
||||
ret = -ENOTSUPP;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
|
|||
*/
|
||||
ret = cache_shared_cpu_map_setup(cpu);
|
||||
if (ret) {
|
||||
pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
|
||||
cpu);
|
||||
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
|
||||
goto free_ci;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
|||
}
|
||||
EXPORT_SYMBOL(generate_pm_trace);
|
||||
|
||||
extern char __tracedata_start, __tracedata_end;
|
||||
extern char __tracedata_start[], __tracedata_end[];
|
||||
static int show_file_hash(unsigned int value)
|
||||
{
|
||||
int match;
|
||||
char *tracedata;
|
||||
|
||||
match = 0;
|
||||
for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
|
||||
for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
|
||||
tracedata += 2 + sizeof(unsigned long)) {
|
||||
unsigned short lineno = *(unsigned short *)tracedata;
|
||||
const char *file = *(const char **)(tracedata + 2);
|
||||
|
|
|
@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
|
|||
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
|
||||
psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
|
||||
button_info = 0x33;
|
||||
} else if (!button_info) {
|
||||
psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
|
||||
button_info = 0x33;
|
||||
}
|
||||
|
||||
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
|
||||
|
|
|
@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct sdhci_host *host;
|
||||
struct device_node *np;
|
||||
struct sdhci_pltfm_host *pltfm_host;
|
||||
struct sdhci_esdhc *esdhc;
|
||||
int ret;
|
||||
|
||||
np = pdev->dev.of_node;
|
||||
|
@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
|
||||
sdhci_get_of_property(pdev);
|
||||
|
||||
pltfm_host = sdhci_priv(host);
|
||||
esdhc = pltfm_host->priv;
|
||||
if (esdhc->vendor_ver == VENDOR_V_22)
|
||||
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
|
||||
|
||||
if (esdhc->vendor_ver > VENDOR_V_22)
|
||||
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
|
||||
|
|
|
@ -2205,19 +2205,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
|
|||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
dma_addr_t paddr = tp->counters_phys_addr;
|
||||
u32 cmd;
|
||||
bool ret;
|
||||
|
||||
RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
|
||||
RTL_R32(CounterAddrHigh);
|
||||
cmd = (u64)paddr & DMA_BIT_MASK(32);
|
||||
RTL_W32(CounterAddrLow, cmd);
|
||||
RTL_W32(CounterAddrLow, cmd | counter_cmd);
|
||||
|
||||
ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
|
||||
|
||||
RTL_W32(CounterAddrLow, 0);
|
||||
RTL_W32(CounterAddrHigh, 0);
|
||||
|
||||
return ret;
|
||||
return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
|
||||
}
|
||||
|
||||
static bool rtl8169_reset_counters(struct net_device *dev)
|
||||
|
|
|
@ -860,6 +860,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
|
|||
struct pppoe_hdr *ph;
|
||||
struct net_device *dev;
|
||||
char *start;
|
||||
int hlen;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
|
||||
|
@ -878,16 +879,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
|
|||
if (total_len > (dev->mtu + dev->hard_header_len))
|
||||
goto end;
|
||||
|
||||
|
||||
skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
|
||||
0, GFP_KERNEL);
|
||||
hlen = LL_RESERVED_SPACE(dev);
|
||||
skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
|
||||
dev->needed_tailroom, 0, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
error = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Reserve space for headers. */
|
||||
skb_reserve(skb, dev->hard_header_len);
|
||||
skb_reserve(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
skb->dev = dev;
|
||||
|
@ -948,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
|
|||
/* Copy the data if there is no space for the header or if it's
|
||||
* read-only.
|
||||
*/
|
||||
if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
|
||||
if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
|
||||
goto abort;
|
||||
|
||||
__skb_push(skb, sizeof(*ph));
|
||||
|
|
|
@ -1859,6 +1859,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
|
|||
buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
|
||||
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
|
||||
dev->rx_qlen = 4;
|
||||
dev->tx_qlen = 4;
|
||||
}
|
||||
|
||||
ret = lan78xx_write_reg(dev, BURST_CAP, buf);
|
||||
|
|
|
@ -1563,7 +1563,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
|||
rq->rx_ring[i].basePA);
|
||||
rq->rx_ring[i].base = NULL;
|
||||
}
|
||||
rq->buf_info[i] = NULL;
|
||||
}
|
||||
|
||||
if (rq->comp_ring.base) {
|
||||
|
@ -1578,6 +1577,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
|||
(rq->rx_ring[0].size + rq->rx_ring[1].size);
|
||||
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
|
||||
rq->buf_info_pa);
|
||||
rq->buf_info[0] = rq->buf_info[1] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
|
|||
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
|
||||
}
|
||||
|
||||
/* Drop MSG TLP except for Vendor MSG */
|
||||
static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(pcie->dbi + PCIE_STRFMR1);
|
||||
val &= 0xDFFFFFFF;
|
||||
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
|
||||
}
|
||||
|
||||
static int ls1021_pcie_link_up(struct pcie_port *pp)
|
||||
{
|
||||
u32 state;
|
||||
|
@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
|
|||
static void ls1021_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct ls_pcie *pcie = to_ls_pcie(pp);
|
||||
u32 val, index[2];
|
||||
u32 index[2];
|
||||
|
||||
pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
|
||||
"fsl,pcie-scfg");
|
||||
|
@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
/*
|
||||
* LS1021A Workaround for internal TKT228622
|
||||
* to fix the INTx hang issue
|
||||
*/
|
||||
val = ioread32(pcie->dbi + PCIE_STRFMR1);
|
||||
val &= 0xffff;
|
||||
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
|
||||
ls_pcie_drop_msg_tlp(pcie);
|
||||
}
|
||||
|
||||
static int ls_pcie_link_up(struct pcie_port *pp)
|
||||
|
@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
|
|||
iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
|
||||
ls_pcie_fix_class(pcie);
|
||||
ls_pcie_clear_multifunction(pcie);
|
||||
ls_pcie_drop_msg_tlp(pcie);
|
||||
iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
|
||||
}
|
||||
|
||||
|
@ -203,6 +208,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
|
|||
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
|
||||
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
|
||||
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
|
||||
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
|
||||
|
|
|
@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
|||
|
||||
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
|
||||
reason = FAILURE_SESSION_IN_RECOVERY;
|
||||
sc->result = DID_REQUEUE;
|
||||
sc->result = DID_REQUEUE << 16;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
|
|
|
@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
|
|||
* step 1?
|
||||
*/
|
||||
if (ud->tcp_socket) {
|
||||
dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
|
||||
ud->tcp_socket);
|
||||
dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
|
||||
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
|
||||
}
|
||||
|
||||
|
|
|
@ -338,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
|
|||
return priv;
|
||||
}
|
||||
|
||||
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
|
||||
static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
|
||||
{
|
||||
struct usb_device *udev = sdev->udev;
|
||||
struct usb_host_endpoint *ep;
|
||||
struct usb_endpoint_descriptor *epd = NULL;
|
||||
int epnum = pdu->base.ep;
|
||||
int dir = pdu->base.direction;
|
||||
|
||||
if (epnum < 0 || epnum > 15)
|
||||
goto err_ret;
|
||||
|
||||
if (dir == USBIP_DIR_IN)
|
||||
ep = udev->ep_in[epnum & 0x7f];
|
||||
else
|
||||
ep = udev->ep_out[epnum & 0x7f];
|
||||
if (!ep) {
|
||||
dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
|
||||
epnum);
|
||||
BUG();
|
||||
}
|
||||
if (!ep)
|
||||
goto err_ret;
|
||||
|
||||
epd = &ep->desc;
|
||||
|
||||
if (usb_endpoint_xfer_control(epd)) {
|
||||
if (dir == USBIP_DIR_OUT)
|
||||
return usb_sndctrlpipe(udev, epnum);
|
||||
|
@ -377,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
|
|||
}
|
||||
|
||||
if (usb_endpoint_xfer_isoc(epd)) {
|
||||
/* validate packet size and number of packets */
|
||||
unsigned int maxp, packets, bytes;
|
||||
|
||||
#define USB_EP_MAXP_MULT_SHIFT 11
|
||||
#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
|
||||
#define USB_EP_MAXP_MULT(m) \
|
||||
(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
|
||||
|
||||
maxp = usb_endpoint_maxp(epd);
|
||||
maxp *= (USB_EP_MAXP_MULT(
|
||||
__le16_to_cpu(epd->wMaxPacketSize)) + 1);
|
||||
bytes = pdu->u.cmd_submit.transfer_buffer_length;
|
||||
packets = DIV_ROUND_UP(bytes, maxp);
|
||||
|
||||
if (pdu->u.cmd_submit.number_of_packets < 0 ||
|
||||
pdu->u.cmd_submit.number_of_packets > packets) {
|
||||
dev_err(&sdev->udev->dev,
|
||||
"CMD_SUBMIT: isoc invalid num packets %d\n",
|
||||
pdu->u.cmd_submit.number_of_packets);
|
||||
return -1;
|
||||
}
|
||||
if (dir == USBIP_DIR_OUT)
|
||||
return usb_sndisocpipe(udev, epnum);
|
||||
else
|
||||
return usb_rcvisocpipe(udev, epnum);
|
||||
}
|
||||
|
||||
err_ret:
|
||||
/* NOT REACHED */
|
||||
dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
|
||||
return 0;
|
||||
dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void masking_bogus_flags(struct urb *urb)
|
||||
|
@ -449,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
|
|||
struct stub_priv *priv;
|
||||
struct usbip_device *ud = &sdev->ud;
|
||||
struct usb_device *udev = sdev->udev;
|
||||
int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
|
||||
int pipe = get_pipe(sdev, pdu);
|
||||
|
||||
if (pipe == -1)
|
||||
return;
|
||||
|
||||
priv = stub_priv_alloc(sdev, pdu);
|
||||
if (!priv)
|
||||
|
|
|
@ -317,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size)
|
|||
struct msghdr msg;
|
||||
struct kvec iov;
|
||||
int total = 0;
|
||||
|
||||
/* for blocks of if (usbip_dbg_flag_xmit) */
|
||||
char *bp = buf;
|
||||
int osize = size;
|
||||
|
||||
usbip_dbg_xmit("enter\n");
|
||||
|
||||
if (!sock || !buf || !size) {
|
||||
pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
|
||||
size);
|
||||
if (!sock || !buf || !size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
usbip_dbg_xmit("enter\n");
|
||||
|
||||
do {
|
||||
sock->sk->sk_allocation = GFP_NOIO;
|
||||
|
@ -341,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
|
|||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
|
||||
result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
|
||||
if (result <= 0) {
|
||||
pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
|
||||
sock, buf, size, result, total);
|
||||
if (result <= 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
size -= result;
|
||||
buf += result;
|
||||
|
|
|
@ -261,6 +261,7 @@ struct usbip_device {
|
|||
/* lock for status */
|
||||
spinlock_t lock;
|
||||
|
||||
int sockfd;
|
||||
struct socket *tcp_socket;
|
||||
|
||||
struct task_struct *tcp_rx;
|
||||
|
|
|
@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
|
|||
int usbip_event_happened(struct usbip_device *ud)
|
||||
{
|
||||
int happened = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&ud->lock);
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
if (ud->event != 0)
|
||||
happened = 1;
|
||||
spin_unlock(&ud->lock);
|
||||
spin_unlock_irqrestore(&ud->lock, flags);
|
||||
|
||||
return happened;
|
||||
}
|
||||
|
|
|
@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
|
|||
|
||||
void rh_port_connect(int rhport, enum usb_device_speed speed)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
|
||||
| (1 << USB_PORT_FEAT_C_CONNECTION);
|
||||
|
@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
|
|||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
|
||||
}
|
||||
|
||||
static void rh_port_disconnect(int rhport)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
|
||||
the_controller->port_status[rhport] |=
|
||||
(1 << USB_PORT_FEAT_C_CONNECTION);
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
|
||||
}
|
||||
|
||||
|
@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
|
|||
int retval;
|
||||
int rhport;
|
||||
int changed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
|
||||
memset(buf, 0, retval);
|
||||
|
||||
vhci = hcd_to_vhci(hcd);
|
||||
|
||||
spin_lock(&vhci->lock);
|
||||
spin_lock_irqsave(&vhci->lock, flags);
|
||||
if (!HCD_HW_ACCESSIBLE(hcd)) {
|
||||
usbip_dbg_vhci_rh("hw accessible flag not on?\n");
|
||||
goto done;
|
||||
|
@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
|
|||
usb_hcd_resume_root_hub(hcd);
|
||||
|
||||
done:
|
||||
spin_unlock(&vhci->lock);
|
||||
spin_unlock_irqrestore(&vhci->lock, flags);
|
||||
return changed ? retval : 0;
|
||||
}
|
||||
|
||||
|
@ -236,6 +241,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
struct vhci_hcd *dum;
|
||||
int retval = 0;
|
||||
int rhport;
|
||||
unsigned long flags;
|
||||
|
||||
u32 prev_port_status[VHCI_NPORTS];
|
||||
|
||||
|
@ -254,7 +260,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
|
||||
dum = hcd_to_vhci(hcd);
|
||||
|
||||
spin_lock(&dum->lock);
|
||||
spin_lock_irqsave(&dum->lock, flags);
|
||||
|
||||
/* store old status and compare now and old later */
|
||||
if (usbip_dbg_flag_vhci_rh) {
|
||||
|
@ -408,7 +414,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
}
|
||||
usbip_dbg_vhci_rh(" bye\n");
|
||||
|
||||
spin_unlock(&dum->lock);
|
||||
spin_unlock_irqrestore(&dum->lock, flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -431,6 +437,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|||
{
|
||||
struct vhci_device *vdev = get_vdev(urb->dev);
|
||||
struct vhci_priv *priv;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vdev) {
|
||||
pr_err("could not get virtual device");
|
||||
|
@ -443,7 +450,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
priv->seqnum = atomic_inc_return(&the_controller->seqnum);
|
||||
if (priv->seqnum == 0xffff)
|
||||
|
@ -457,7 +464,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|||
list_add_tail(&priv->list, &vdev->priv_tx);
|
||||
|
||||
wake_up(&vdev->waitq_tx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
}
|
||||
|
||||
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
||||
|
@ -466,15 +473,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
|||
struct device *dev = &urb->dev->dev;
|
||||
int ret = 0;
|
||||
struct vhci_device *vdev;
|
||||
unsigned long flags;
|
||||
|
||||
/* patch to usb_sg_init() is in 2.5.60 */
|
||||
BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
if (urb->status != -EINPROGRESS) {
|
||||
dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return urb->status;
|
||||
}
|
||||
|
||||
|
@ -486,7 +494,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
|||
vdev->ud.status == VDEV_ST_ERROR) {
|
||||
dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
|
@ -559,14 +567,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
|||
|
||||
out:
|
||||
vhci_tx_urb(urb);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
no_need_xmit:
|
||||
usb_hcd_unlink_urb_from_ep(hcd, urb);
|
||||
no_need_unlink:
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
if (!ret)
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
|
||||
urb, urb->status);
|
||||
|
@ -623,14 +631,15 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
{
|
||||
struct vhci_priv *priv;
|
||||
struct vhci_device *vdev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
priv = urb->hcpriv;
|
||||
if (!priv) {
|
||||
/* URB was never linked! or will be soon given back by
|
||||
* vhci_rx. */
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return -EIDRM;
|
||||
}
|
||||
|
||||
|
@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
|
||||
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
|
||||
if (ret) {
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -664,10 +673,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
*/
|
||||
usb_hcd_unlink_urb_from_ep(hcd, urb);
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
|
||||
urb->status);
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
} else {
|
||||
/* tcp connection is alive */
|
||||
|
@ -679,7 +688,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
|
||||
if (!unlink) {
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -698,7 +707,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
spin_unlock(&vdev->priv_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usbip_dbg_vhci_hc("leave\n");
|
||||
return 0;
|
||||
|
@ -707,8 +716,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
|
||||
{
|
||||
struct vhci_unlink *unlink, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
spin_lock(&vdev->priv_lock);
|
||||
|
||||
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
|
||||
|
@ -742,19 +752,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
|
|||
list_del(&unlink->list);
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
|
||||
urb->status);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
spin_lock(&vdev->priv_lock);
|
||||
|
||||
kfree(unlink);
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -768,7 +778,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
|
|||
|
||||
/* need this? see stub_dev.c */
|
||||
if (ud->tcp_socket) {
|
||||
pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
|
||||
pr_debug("shutdown sockfd %d\n", ud->sockfd);
|
||||
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
|
||||
}
|
||||
|
||||
|
@ -821,8 +831,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
|
|||
static void vhci_device_reset(struct usbip_device *ud)
|
||||
{
|
||||
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&ud->lock);
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
|
||||
vdev->speed = 0;
|
||||
vdev->devid = 0;
|
||||
|
@ -836,14 +847,16 @@ static void vhci_device_reset(struct usbip_device *ud)
|
|||
}
|
||||
ud->status = VDEV_ST_NULL;
|
||||
|
||||
spin_unlock(&ud->lock);
|
||||
spin_unlock_irqrestore(&ud->lock, flags);
|
||||
}
|
||||
|
||||
static void vhci_device_unusable(struct usbip_device *ud)
|
||||
{
|
||||
spin_lock(&ud->lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
ud->status = VDEV_ST_ERROR;
|
||||
spin_unlock(&ud->lock);
|
||||
spin_unlock_irqrestore(&ud->lock, flags);
|
||||
}
|
||||
|
||||
static void vhci_device_init(struct vhci_device *vdev)
|
||||
|
@ -933,12 +946,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
|
|||
static int vhci_bus_suspend(struct usb_hcd *hcd)
|
||||
{
|
||||
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
|
||||
|
||||
spin_lock(&vhci->lock);
|
||||
spin_lock_irqsave(&vhci->lock, flags);
|
||||
hcd->state = HC_STATE_SUSPENDED;
|
||||
spin_unlock(&vhci->lock);
|
||||
spin_unlock_irqrestore(&vhci->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -947,15 +961,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
|
|||
{
|
||||
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
|
||||
int rc = 0;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
|
||||
|
||||
spin_lock(&vhci->lock);
|
||||
spin_lock_irqsave(&vhci->lock, flags);
|
||||
if (!HCD_HW_ACCESSIBLE(hcd))
|
||||
rc = -ESHUTDOWN;
|
||||
else
|
||||
hcd->state = HC_STATE_RUNNING;
|
||||
spin_unlock(&vhci->lock);
|
||||
spin_unlock_irqrestore(&vhci->lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -1053,17 +1068,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
|
|||
int rhport = 0;
|
||||
int connected = 0;
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
hcd = platform_get_drvdata(pdev);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
|
||||
if (the_controller->port_status[rhport] &
|
||||
USB_PORT_STAT_CONNECTION)
|
||||
connected += 1;
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
if (connected > 0) {
|
||||
dev_info(&pdev->dev,
|
||||
|
|
|
@ -71,10 +71,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
|
|||
{
|
||||
struct usbip_device *ud = &vdev->ud;
|
||||
struct urb *urb;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
if (!urb) {
|
||||
pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
|
||||
|
@ -103,9 +104,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
|
|||
|
||||
usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
|
||||
|
||||
|
@ -116,8 +117,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
|
|||
struct usbip_header *pdu)
|
||||
{
|
||||
struct vhci_unlink *unlink, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
|
||||
pr_info("unlink->seqnum %lu\n", unlink->seqnum);
|
||||
|
@ -126,12 +128,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
|
|||
unlink->seqnum);
|
||||
list_del(&unlink->list);
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
return unlink;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -141,6 +143,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
{
|
||||
struct vhci_unlink *unlink;
|
||||
struct urb *urb;
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dump_header(pdu);
|
||||
|
||||
|
@ -151,9 +154,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
if (!urb) {
|
||||
/*
|
||||
|
@ -170,9 +173,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
urb->status = pdu->u.ret_unlink.status;
|
||||
pr_info("urb->status %d\n", urb->status);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
|
||||
urb->status);
|
||||
|
@ -184,10 +187,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
static int vhci_priv_tx_empty(struct vhci_device *vdev)
|
||||
{
|
||||
int empty = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
empty = list_empty(&vdev->priv_rx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
|
|
@ -32,23 +32,28 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
|
|||
{
|
||||
char *s = out;
|
||||
int i = 0;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!the_controller || !out);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
/*
|
||||
* output example:
|
||||
* prt sta spd dev socket local_busid
|
||||
* 000 004 000 000 c5a7bb80 1-2.3
|
||||
* 001 004 000 000 d8cee980 2-3.4
|
||||
* port sta spd dev sockfd local_busid
|
||||
* 0000 004 000 00000000 000003 1-2.3
|
||||
* 0001 004 000 00000000 000004 2-3.4
|
||||
*
|
||||
* IP address can be retrieved from a socket pointer address by looking
|
||||
* up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
|
||||
* port number and its peer IP address.
|
||||
* Output includes socket fd instead of socket pointer address to
|
||||
* avoid leaking kernel memory address in:
|
||||
* /sys/devices/platform/vhci_hcd.0/status and in debug output.
|
||||
* The socket pointer address is not used at the moment and it was
|
||||
* made visible as a convenient way to find IP address from socket
|
||||
* pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
|
||||
* a security hole, the change is made to use sockfd instead.
|
||||
*/
|
||||
out += sprintf(out,
|
||||
"prt sta spd bus dev socket local_busid\n");
|
||||
"prt sta spd bus dev sockfd local_busid\n");
|
||||
|
||||
for (i = 0; i < VHCI_NPORTS; i++) {
|
||||
struct vhci_device *vdev = port_to_vdev(i);
|
||||
|
@ -60,17 +65,17 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
|
|||
out += sprintf(out, "%03u %08x ",
|
||||
vdev->speed, vdev->devid);
|
||||
out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
|
||||
out += sprintf(out, "%06u", vdev->ud.sockfd);
|
||||
out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
|
||||
|
||||
} else {
|
||||
out += sprintf(out, "000 000 000 0000000000000000 0-0");
|
||||
}
|
||||
} else
|
||||
out += sprintf(out, "000 000 000 000000 0-0");
|
||||
|
||||
out += sprintf(out, "\n");
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
}
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
return out - s;
|
||||
}
|
||||
|
@ -80,11 +85,12 @@ static DEVICE_ATTR_RO(status);
|
|||
static int vhci_port_disconnect(__u32 rhport)
|
||||
{
|
||||
struct vhci_device *vdev;
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dbg_vhci_sysfs("enter\n");
|
||||
|
||||
/* lock */
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
vdev = port_to_vdev(rhport);
|
||||
|
||||
|
@ -94,14 +100,14 @@ static int vhci_port_disconnect(__u32 rhport)
|
|||
|
||||
/* unlock */
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* unlock */
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
|
||||
|
||||
|
@ -177,6 +183,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
int sockfd = 0;
|
||||
__u32 rhport = 0, devid = 0, speed = 0;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* @rhport: port number of vhci_hcd
|
||||
|
@ -202,14 +209,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
/* now need lock until setting vdev status as used */
|
||||
|
||||
/* begin a lock */
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
vdev = port_to_vdev(rhport);
|
||||
spin_lock(&vdev->ud.lock);
|
||||
|
||||
if (vdev->ud.status != VDEV_ST_NULL) {
|
||||
/* end of the lock */
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
sockfd_put(socket);
|
||||
|
||||
|
@ -223,11 +230,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
vdev->devid = devid;
|
||||
vdev->speed = speed;
|
||||
vdev->ud.sockfd = sockfd;
|
||||
vdev->ud.tcp_socket = socket;
|
||||
vdev->ud.status = VDEV_ST_NOTASSIGNED;
|
||||
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
/* end the lock */
|
||||
|
||||
vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
|
||||
|
|
|
@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
|
|||
static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
|
||||
{
|
||||
struct vhci_priv *priv, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
|
||||
list_move_tail(&priv->list, &vdev->priv_rx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
return priv;
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -137,16 +138,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
|
|||
static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
|
||||
{
|
||||
struct vhci_unlink *unlink, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
|
||||
list_move_tail(&unlink->list, &vdev->unlink_rx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
return unlink;
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type)
|
|||
return acl;
|
||||
}
|
||||
|
||||
/*
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
static int
|
||||
__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
int name_index;
|
||||
void *value = NULL;
|
||||
|
@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
if (acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
|
@ -224,6 +214,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (type == ACL_TYPE_ACCESS && acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
return __ext2_set_acl(inode, acl, type);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
|
||||
*
|
||||
|
@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
|
|||
return error;
|
||||
|
||||
if (default_acl) {
|
||||
error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
||||
error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
||||
posix_acl_release(default_acl);
|
||||
}
|
||||
if (acl) {
|
||||
if (!error)
|
||||
error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
||||
error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
||||
posix_acl_release(acl);
|
||||
}
|
||||
return error;
|
||||
|
|
|
@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
|
|||
int who = arg;
|
||||
type = PIDTYPE_PID;
|
||||
if (who < 0) {
|
||||
/* avoid overflow below */
|
||||
if (who == INT_MIN)
|
||||
return;
|
||||
|
||||
type = PIDTYPE_PGID;
|
||||
who = -who;
|
||||
}
|
||||
|
|
|
@ -60,9 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
|
|||
else
|
||||
GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
|
||||
|
||||
/* Each thread allocates its own gi, no race */
|
||||
groups_sort(gi);
|
||||
}
|
||||
|
||||
/* Each thread allocates its own gi, no race */
|
||||
groups_sort(gi);
|
||||
} else {
|
||||
gi = get_group_info(rqgi);
|
||||
}
|
||||
|
|
|
@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
|
|||
"inode has negative prealloc blocks count.");
|
||||
#endif
|
||||
while (ei->i_prealloc_count > 0) {
|
||||
reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
|
||||
ei->i_prealloc_block++;
|
||||
b_blocknr_t block_to_free;
|
||||
|
||||
/*
|
||||
* reiserfs_free_prealloc_block can drop the write lock,
|
||||
* which could allow another caller to free the same block.
|
||||
* We can protect against it by modifying the prealloc
|
||||
* state before calling it.
|
||||
*/
|
||||
block_to_free = ei->i_prealloc_block++;
|
||||
ei->i_prealloc_count--;
|
||||
reiserfs_free_prealloc_block(th, inode, block_to_free);
|
||||
dirty = 1;
|
||||
}
|
||||
if (dirty)
|
||||
|
@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
|
|||
hint->prealloc_size = 0;
|
||||
|
||||
if (!hint->formatted_node && hint->preallocate) {
|
||||
if (S_ISREG(hint->inode->i_mode)
|
||||
if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
|
||||
&& hint->inode->i_size >=
|
||||
REISERFS_SB(hint->th->t_super)->s_alloc_options.
|
||||
preallocmin * hint->inode->i_sb->s_blocksize)
|
||||
|
|
|
@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
error = journal_begin(&th, inode->i_sb, jcreate_blocks);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
if (error == 0) {
|
||||
if (type == ACL_TYPE_ACCESS && acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode,
|
||||
&acl);
|
||||
if (error)
|
||||
goto unlock;
|
||||
}
|
||||
error = __reiserfs_set_acl(&th, inode, type, acl);
|
||||
unlock:
|
||||
reiserfs_write_lock(inode->i_sb);
|
||||
error2 = journal_end(&th);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
|
@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
|
|||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name = POSIX_ACL_XATTR_ACCESS;
|
||||
if (acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
name = POSIX_ACL_XATTR_DEFAULT;
|
||||
|
|
|
@ -625,6 +625,8 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
|
|||
struct inode tmp;
|
||||
struct sdcardfs_inode_data *top = top_data_get(SDCARDFS_I(inode));
|
||||
|
||||
if (IS_ERR(mnt))
|
||||
return PTR_ERR(mnt);
|
||||
if (!top)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
14
fs/select.c
14
fs/select.c
|
@ -29,6 +29,7 @@
|
|||
#include <linux/sched/rt.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
|||
fd_set_bits fds;
|
||||
void *bits;
|
||||
int ret, max_fds;
|
||||
unsigned int size;
|
||||
size_t size, alloc_size;
|
||||
struct fdtable *fdt;
|
||||
/* Allocate small arguments on the stack to save memory and be faster */
|
||||
long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
|
||||
|
@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
|||
if (size > sizeof(stack_fds) / 6) {
|
||||
/* Not enough space in on-stack array; must use kmalloc */
|
||||
ret = -ENOMEM;
|
||||
bits = kmalloc(6 * size, GFP_KERNEL);
|
||||
if (size > (SIZE_MAX / 6))
|
||||
goto out_nofds;
|
||||
|
||||
alloc_size = 6 * size;
|
||||
bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
|
||||
if (!bits && alloc_size > PAGE_SIZE)
|
||||
bits = vmalloc(alloc_size);
|
||||
|
||||
if (!bits)
|
||||
goto out_nofds;
|
||||
}
|
||||
|
@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
|||
|
||||
out:
|
||||
if (bits != stack_fds)
|
||||
kfree(bits);
|
||||
kvfree(bits);
|
||||
out_nofds:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
return inode_permission(inode, mask);
|
||||
return inode_permission2(ERR_PTR(-EOPNOTSUPP), inode, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -71,6 +71,7 @@ struct cpu_cacheinfo {
|
|||
struct cacheinfo *info_list;
|
||||
unsigned int num_levels;
|
||||
unsigned int num_leaves;
|
||||
bool cpu_map_populated;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -63,6 +63,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
|
|||
#define ktime_add(lhs, rhs) \
|
||||
({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
|
||||
|
||||
/*
|
||||
* Same as ktime_add(), but avoids undefined behaviour on overflow; however,
|
||||
* this means that you must check the result for overflow yourself.
|
||||
*/
|
||||
#define ktime_add_unsafe(lhs, rhs) \
|
||||
({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
|
||||
|
||||
/*
|
||||
* Add a ktime_t variable and a scalar nanosecond value.
|
||||
* res = kt + nsval:
|
||||
|
|
|
@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
|
|||
unsigned int target_offset,
|
||||
unsigned int next_offset);
|
||||
|
||||
unsigned int *xt_alloc_entry_offsets(unsigned int size);
|
||||
bool xt_find_jump_offset(const unsigned int *offsets,
|
||||
unsigned int target, unsigned int size);
|
||||
|
||||
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
|
||||
bool inv_proto);
|
||||
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
|
||||
|
@ -377,16 +381,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
|
|||
* allows us to return 0 for single core systems without forcing
|
||||
* callers to deal with SMP vs. NONSMP issues.
|
||||
*/
|
||||
static inline u64 xt_percpu_counter_alloc(void)
|
||||
static inline unsigned long xt_percpu_counter_alloc(void)
|
||||
{
|
||||
if (nr_cpu_ids > 1) {
|
||||
void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
|
||||
sizeof(struct xt_counters));
|
||||
|
||||
if (res == NULL)
|
||||
return (u64) -ENOMEM;
|
||||
return -ENOMEM;
|
||||
|
||||
return (u64) (__force unsigned long) res;
|
||||
return (__force unsigned long) res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1539,6 +1539,7 @@ struct sched_dl_entity {
|
|||
u64 dl_deadline; /* relative deadline of each instance */
|
||||
u64 dl_period; /* separation of two instances (period) */
|
||||
u64 dl_bw; /* dl_runtime / dl_deadline */
|
||||
u64 dl_density; /* dl_runtime / dl_deadline */
|
||||
|
||||
/*
|
||||
* Actual scheduling parameters. Initialized with the values above,
|
||||
|
|
|
@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
|
|||
return (struct tcphdr *)skb_transport_header(skb);
|
||||
}
|
||||
|
||||
static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
|
||||
{
|
||||
return th->doff * 4;
|
||||
}
|
||||
|
||||
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
|
||||
{
|
||||
return tcp_hdr(skb)->doff * 4;
|
||||
return __tcp_hdrlen(tcp_hdr(skb));
|
||||
}
|
||||
|
||||
static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
|
||||
|
|
|
@ -24,16 +24,10 @@
|
|||
#ifndef MODULE_ARCH_VERMAGIC
|
||||
#define MODULE_ARCH_VERMAGIC ""
|
||||
#endif
|
||||
#ifdef RETPOLINE
|
||||
#define MODULE_VERMAGIC_RETPOLINE "retpoline "
|
||||
#else
|
||||
#define MODULE_VERMAGIC_RETPOLINE ""
|
||||
#endif
|
||||
|
||||
#define VERMAGIC_STRING \
|
||||
UTS_RELEASE " " \
|
||||
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
||||
MODULE_ARCH_VERMAGIC \
|
||||
MODULE_VERMAGIC_RETPOLINE
|
||||
MODULE_ARCH_VERMAGIC
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
|
|||
|
||||
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
|
||||
key = INADDR_ANY;
|
||||
|
||||
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
|
|||
int flags);
|
||||
int ip6_flowlabel_init(void);
|
||||
void ip6_flowlabel_cleanup(void);
|
||||
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
|
||||
|
||||
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
|
||||
{
|
||||
|
|
|
@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2)
|
|||
return net1 == net2;
|
||||
}
|
||||
|
||||
static inline int check_net(const struct net *net)
|
||||
{
|
||||
return atomic_read(&net->count) != 0;
|
||||
}
|
||||
|
||||
void net_drop_ns(void *);
|
||||
|
||||
#else
|
||||
|
@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int check_net(const struct net *net)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define net_drop_ns NULL
|
||||
#endif
|
||||
|
||||
|
|
|
@ -26,6 +26,19 @@
|
|||
#define EPOLL_CTL_DEL 2
|
||||
#define EPOLL_CTL_MOD 3
|
||||
|
||||
/* Epoll event masks */
|
||||
#define EPOLLIN 0x00000001
|
||||
#define EPOLLPRI 0x00000002
|
||||
#define EPOLLOUT 0x00000004
|
||||
#define EPOLLERR 0x00000008
|
||||
#define EPOLLHUP 0x00000010
|
||||
#define EPOLLRDNORM 0x00000040
|
||||
#define EPOLLRDBAND 0x00000080
|
||||
#define EPOLLWRNORM 0x00000100
|
||||
#define EPOLLWRBAND 0x00000200
|
||||
#define EPOLLMSG 0x00000400
|
||||
#define EPOLLRDHUP 0x00002000
|
||||
|
||||
/*
|
||||
* Request the handling of system wakeup events so as to prevent system suspends
|
||||
* from happening while those events are being processed.
|
||||
|
|
|
@ -742,7 +742,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
|
|||
if (*msgtyp == 0)
|
||||
return SEARCH_ANY;
|
||||
if (*msgtyp < 0) {
|
||||
*msgtyp = -*msgtyp;
|
||||
if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
|
||||
*msgtyp = LONG_MAX;
|
||||
else
|
||||
*msgtyp = -*msgtyp;
|
||||
return SEARCH_LESSEQUAL;
|
||||
}
|
||||
if (msgflg & MSG_EXCEPT)
|
||||
|
|
|
@ -2289,6 +2289,7 @@ void __dl_clear_params(struct task_struct *p)
|
|||
dl_se->dl_period = 0;
|
||||
dl_se->flags = 0;
|
||||
dl_se->dl_bw = 0;
|
||||
dl_se->dl_density = 0;
|
||||
|
||||
dl_se->dl_throttled = 0;
|
||||
dl_se->dl_new = 1;
|
||||
|
@ -3991,6 +3992,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
|
|||
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
|
||||
dl_se->flags = attr->sched_flags;
|
||||
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
|
||||
dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
|
||||
|
||||
/*
|
||||
* Changing the parameters of a task is 'tricky' and we're not doing
|
||||
|
|
|
@ -502,13 +502,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
|
|||
}
|
||||
|
||||
/*
|
||||
* When a -deadline entity is queued back on the runqueue, its runtime and
|
||||
* deadline might need updating.
|
||||
* Revised wakeup rule [1]: For self-suspending tasks, rather then
|
||||
* re-initializing task's runtime and deadline, the revised wakeup
|
||||
* rule adjusts the task's runtime to avoid the task to overrun its
|
||||
* density.
|
||||
*
|
||||
* The policy here is that we update the deadline of the entity only if:
|
||||
* - the current deadline is in the past,
|
||||
* - using the remaining runtime with the current deadline would make
|
||||
* the entity exceed its bandwidth.
|
||||
* Reasoning: a task may overrun the density if:
|
||||
* runtime / (deadline - t) > dl_runtime / dl_deadline
|
||||
*
|
||||
* Therefore, runtime can be adjusted to:
|
||||
* runtime = (dl_runtime / dl_deadline) * (deadline - t)
|
||||
*
|
||||
* In such way that runtime will be equal to the maximum density
|
||||
* the task can use without breaking any rule.
|
||||
*
|
||||
* [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
|
||||
* bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
|
||||
*/
|
||||
static void
|
||||
update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
|
||||
{
|
||||
u64 laxity = dl_se->deadline - rq_clock(rq);
|
||||
|
||||
/*
|
||||
* If the task has deadline < period, and the deadline is in the past,
|
||||
* it should already be throttled before this check.
|
||||
*
|
||||
* See update_dl_entity() comments for further details.
|
||||
*/
|
||||
WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
|
||||
|
||||
dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
|
||||
}
|
||||
|
||||
/*
|
||||
* Regarding the deadline, a task with implicit deadline has a relative
|
||||
* deadline == relative period. A task with constrained deadline has a
|
||||
* relative deadline <= relative period.
|
||||
*
|
||||
* We support constrained deadline tasks. However, there are some restrictions
|
||||
* applied only for tasks which do not have an implicit deadline. See
|
||||
* update_dl_entity() to know more about such restrictions.
|
||||
*
|
||||
* The dl_is_implicit() returns true if the task has an implicit deadline.
|
||||
*/
|
||||
static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
|
||||
{
|
||||
return dl_se->dl_deadline == dl_se->dl_period;
|
||||
}
|
||||
|
||||
/*
|
||||
* When a deadline entity is placed in the runqueue, its runtime and deadline
|
||||
* might need to be updated. This is done by a CBS wake up rule. There are two
|
||||
* different rules: 1) the original CBS; and 2) the Revisited CBS.
|
||||
*
|
||||
* When the task is starting a new period, the Original CBS is used. In this
|
||||
* case, the runtime is replenished and a new absolute deadline is set.
|
||||
*
|
||||
* When a task is queued before the begin of the next period, using the
|
||||
* remaining runtime and deadline could make the entity to overflow, see
|
||||
* dl_entity_overflow() to find more about runtime overflow. When such case
|
||||
* is detected, the runtime and deadline need to be updated.
|
||||
*
|
||||
* If the task has an implicit deadline, i.e., deadline == period, the Original
|
||||
* CBS is applied. the runtime is replenished and a new absolute deadline is
|
||||
* set, as in the previous cases.
|
||||
*
|
||||
* However, the Original CBS does not work properly for tasks with
|
||||
* deadline < period, which are said to have a constrained deadline. By
|
||||
* applying the Original CBS, a constrained deadline task would be able to run
|
||||
* runtime/deadline in a period. With deadline < period, the task would
|
||||
* overrun the runtime/period allowed bandwidth, breaking the admission test.
|
||||
*
|
||||
* In order to prevent this misbehave, the Revisited CBS is used for
|
||||
* constrained deadline tasks when a runtime overflow is detected. In the
|
||||
* Revisited CBS, rather than replenishing & setting a new absolute deadline,
|
||||
* the remaining runtime of the task is reduced to avoid runtime overflow.
|
||||
* Please refer to the comments update_dl_revised_wakeup() function to find
|
||||
* more about the Revised CBS rule.
|
||||
*/
|
||||
static void update_dl_entity(struct sched_dl_entity *dl_se,
|
||||
struct sched_dl_entity *pi_se)
|
||||
|
@ -530,6 +601,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
|
|||
|
||||
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
|
||||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
|
||||
|
||||
if (unlikely(!dl_is_implicit(dl_se) &&
|
||||
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
|
||||
!dl_se->dl_boosted)){
|
||||
update_dl_revised_wakeup(dl_se, rq);
|
||||
return;
|
||||
}
|
||||
|
||||
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
|
||||
dl_se->runtime = pi_se->dl_runtime;
|
||||
}
|
||||
|
@ -1054,11 +1133,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
|
|||
__dequeue_dl_entity(dl_se);
|
||||
}
|
||||
|
||||
static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
|
||||
{
|
||||
return dl_se->dl_deadline < dl_se->dl_period;
|
||||
}
|
||||
|
||||
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
struct task_struct *pi_task = rt_mutex_get_top_task(p);
|
||||
|
@ -1090,7 +1164,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
|||
* If that is the case, the task will be throttled and
|
||||
* the replenishment timer will be set to the next period.
|
||||
*/
|
||||
if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
|
||||
if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
|
||||
dl_check_constrained_dl(&p->dl);
|
||||
|
||||
/*
|
||||
|
|
|
@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns);
|
|||
*/
|
||||
ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
|
||||
{
|
||||
ktime_t res = ktime_add(lhs, rhs);
|
||||
ktime_t res = ktime_add_unsafe(lhs, rhs);
|
||||
|
||||
/*
|
||||
* We use KTIME_SEC_MAX here, the maximum timeout which we can
|
||||
|
@ -669,7 +669,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
|
|||
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
base->expires_next.tv64 = KTIME_MAX;
|
||||
base->hang_detected = 0;
|
||||
base->hres_active = 0;
|
||||
base->next_timer = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1587,6 +1589,7 @@ static void init_hrtimers_cpu(int cpu)
|
|||
timerqueue_init_head(&cpu_base->clock_base[i].active);
|
||||
}
|
||||
|
||||
cpu_base->active_bases = 0;
|
||||
cpu_base->cpu = cpu;
|
||||
hrtimer_init_hres(cpu_base);
|
||||
}
|
||||
|
|
15
mm/cma.c
15
mm/cma.c
|
@ -55,7 +55,7 @@ unsigned long cma_get_size(const struct cma *cma)
|
|||
}
|
||||
|
||||
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
|
||||
int align_order)
|
||||
unsigned int align_order)
|
||||
{
|
||||
if (align_order <= cma->order_per_bit)
|
||||
return 0;
|
||||
|
@ -63,17 +63,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
|
|||
}
|
||||
|
||||
/*
|
||||
* Find a PFN aligned to the specified order and return an offset represented in
|
||||
* order_per_bits.
|
||||
* Find the offset of the base PFN from the specified align_order.
|
||||
* The value returned is represented in order_per_bits.
|
||||
*/
|
||||
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
|
||||
int align_order)
|
||||
unsigned int align_order)
|
||||
{
|
||||
if (align_order <= cma->order_per_bit)
|
||||
return 0;
|
||||
|
||||
return (ALIGN(cma->base_pfn, (1UL << align_order))
|
||||
- cma->base_pfn) >> cma->order_per_bit;
|
||||
return (cma->base_pfn & ((1UL << align_order) - 1))
|
||||
>> cma->order_per_bit;
|
||||
}
|
||||
|
||||
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
|
||||
|
|
|
@ -5576,7 +5576,7 @@ static void uncharge_list(struct list_head *page_list)
|
|||
next = page->lru.next;
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
VM_BUG_ON_PAGE(page_count(page), page);
|
||||
VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
|
||||
|
||||
if (!page->mem_cgroup)
|
||||
continue;
|
||||
|
|
|
@ -539,6 +539,13 @@ static int delete_from_lru_cache(struct page *p)
|
|||
*/
|
||||
ClearPageActive(p);
|
||||
ClearPageUnevictable(p);
|
||||
|
||||
/*
|
||||
* Poisoned page might never drop its ref count to 0 so we have
|
||||
* to uncharge it manually from its memcg.
|
||||
*/
|
||||
mem_cgroup_uncharge(p);
|
||||
|
||||
/*
|
||||
* drop the page count elevated by isolate_lru_page()
|
||||
*/
|
||||
|
|
|
@ -2218,7 +2218,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|||
gap_addr = TASK_SIZE;
|
||||
|
||||
next = vma->vm_next;
|
||||
if (next && next->vm_start < gap_addr) {
|
||||
if (next && next->vm_start < gap_addr &&
|
||||
(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
|
||||
if (!(next->vm_flags & VM_GROWSUP))
|
||||
return -ENOMEM;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
|
@ -2303,7 +2304,8 @@ int expand_downwards(struct vm_area_struct *vma,
|
|||
if (gap_addr > address)
|
||||
return -ENOMEM;
|
||||
prev = vma->vm_prev;
|
||||
if (prev && prev->vm_end > gap_addr) {
|
||||
if (prev && prev->vm_end > gap_addr &&
|
||||
(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
|
||||
if (!(prev->vm_flags & VM_GROWSDOWN))
|
||||
return -ENOMEM;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
|
|
|
@ -2545,9 +2545,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|||
if (!area->nr_free)
|
||||
continue;
|
||||
|
||||
if (alloc_harder)
|
||||
return true;
|
||||
|
||||
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
|
||||
#ifdef CONFIG_CMA
|
||||
/*
|
||||
|
@ -2567,6 +2564,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|||
return true;
|
||||
}
|
||||
#endif
|
||||
if (alloc_harder &&
|
||||
!list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
if (unlikely(!net_eq(dev_net(dev), &init_net)))
|
||||
goto drop;
|
||||
|
||||
if (WARN_ONCE(dev->type != ARPHRD_CAN ||
|
||||
skb->len != CAN_MTU ||
|
||||
cfd->len > CAN_MAX_DLEN,
|
||||
"PF_CAN: dropped non conform CAN skbuf: "
|
||||
"dev type %d, len %d, datalen %d\n",
|
||||
dev->type, skb->len, cfd->len))
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
|
||||
cfd->len > CAN_MAX_DLEN)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
|
||||
dev->type, skb->len, cfd->len);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
can_receive(skb, dev);
|
||||
return NET_RX_SUCCESS;
|
||||
|
@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
if (unlikely(!net_eq(dev_net(dev), &init_net)))
|
||||
goto drop;
|
||||
|
||||
if (WARN_ONCE(dev->type != ARPHRD_CAN ||
|
||||
skb->len != CANFD_MTU ||
|
||||
cfd->len > CANFD_MAX_DLEN,
|
||||
"PF_CAN: dropped non conform CAN FD skbuf: "
|
||||
"dev type %d, len %d, datalen %d\n",
|
||||
dev->type, skb->len, cfd->len))
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
|
||||
cfd->len > CANFD_MAX_DLEN)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
|
||||
dev->type, skb->len, cfd->len);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
can_receive(skb, dev);
|
||||
return NET_RX_SUCCESS;
|
||||
|
|
|
@ -2895,10 +2895,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
|
|||
hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
||||
|
||||
/* + transport layer */
|
||||
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
|
||||
hdr_len += tcp_hdrlen(skb);
|
||||
else
|
||||
hdr_len += sizeof(struct udphdr);
|
||||
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
|
||||
const struct tcphdr *th;
|
||||
struct tcphdr _tcphdr;
|
||||
|
||||
th = skb_header_pointer(skb, skb_transport_offset(skb),
|
||||
sizeof(_tcphdr), &_tcphdr);
|
||||
if (likely(th))
|
||||
hdr_len += __tcp_hdrlen(th);
|
||||
} else {
|
||||
struct udphdr _udphdr;
|
||||
|
||||
if (skb_header_pointer(skb, skb_transport_offset(skb),
|
||||
sizeof(_udphdr), &_udphdr))
|
||||
hdr_len += sizeof(struct udphdr);
|
||||
}
|
||||
|
||||
if (shinfo->gso_type & SKB_GSO_DODGY)
|
||||
gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
|
||||
|
|
|
@ -527,8 +527,8 @@ ip_proto_again:
|
|||
out_good:
|
||||
ret = true;
|
||||
|
||||
key_control->thoff = (u16)nhoff;
|
||||
out:
|
||||
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
|
||||
key_basic->n_proto = proto;
|
||||
key_basic->ip_proto = ip_proto;
|
||||
|
||||
|
@ -536,7 +536,6 @@ out:
|
|||
|
||||
out_bad:
|
||||
ret = false;
|
||||
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_flow_dissect);
|
||||
|
|
|
@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
|
|||
if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
|
||||
nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
|
||||
|
||||
hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
|
||||
hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
|
||||
|
||||
if (n->parms->dead) {
|
||||
rc = ERR_PTR(-EINVAL);
|
||||
|
@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
|
|||
n1 != NULL;
|
||||
n1 = rcu_dereference_protected(n1->next,
|
||||
lockdep_is_held(&tbl->lock))) {
|
||||
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
|
||||
if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
|
||||
if (want_ref)
|
||||
neigh_hold(n1);
|
||||
rc = n1;
|
||||
|
|
|
@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
|
|||
|
||||
ccid2_pr_debug("RTO_EXPIRE\n");
|
||||
|
||||
if (sk->sk_state == DCCP_CLOSED)
|
||||
goto out;
|
||||
|
||||
/* back-off timer */
|
||||
hc->tx_rto <<= 1;
|
||||
if (hc->tx_rto > DCCP_RTO_MAX)
|
||||
|
|
|
@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
|
|||
|
||||
static int arp_constructor(struct neighbour *neigh)
|
||||
{
|
||||
__be32 addr = *(__be32 *)neigh->primary_key;
|
||||
__be32 addr;
|
||||
struct net_device *dev = neigh->dev;
|
||||
struct in_device *in_dev;
|
||||
struct neigh_parms *parms;
|
||||
u32 inaddr_any = INADDR_ANY;
|
||||
|
||||
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
|
||||
memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
|
||||
|
||||
addr = *(__be32 *)neigh->primary_key;
|
||||
rcu_read_lock();
|
||||
in_dev = __in_dev_get_rcu(dev);
|
||||
if (!in_dev) {
|
||||
|
|
|
@ -338,7 +338,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
|
|||
return htonl(INADDR_ANY);
|
||||
|
||||
for_ifa(in_dev) {
|
||||
if (inet_ifa_match(fl4->saddr, ifa))
|
||||
if (fl4->saddr == ifa->ifa_local)
|
||||
return fl4->saddr;
|
||||
} endfor_ifa(in_dev);
|
||||
|
||||
|
|
|
@ -367,23 +367,12 @@ static inline bool unconditional(const struct arpt_entry *e)
|
|||
memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct arpt_entry *target)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
* there are loops. Puts hook bitmask in comefrom.
|
||||
*/
|
||||
static int mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
unsigned int valid_hooks, void *entry0,
|
||||
unsigned int *offsets)
|
||||
{
|
||||
unsigned int hook;
|
||||
|
||||
|
@ -472,10 +461,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|||
/* This a jump; chase it. */
|
||||
duprintf("Jump rule %u -> %u\n",
|
||||
pos, newpos);
|
||||
if (!xt_find_jump_offset(offsets, newpos,
|
||||
newinfo->number))
|
||||
return 0;
|
||||
e = (struct arpt_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
|
@ -521,11 +511,13 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
|
|||
{
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
unsigned long pcnt;
|
||||
int ret;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(pcnt))
|
||||
return -ENOMEM;
|
||||
e->counters.pcnt = pcnt;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
|
||||
|
@ -642,6 +634,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
const struct arpt_replace *repl)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -655,6 +648,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
}
|
||||
|
||||
duprintf("translate_table: size %u\n", newinfo->size);
|
||||
offsets = xt_alloc_entry_offsets(newinfo->number);
|
||||
if (!offsets)
|
||||
return -ENOMEM;
|
||||
i = 0;
|
||||
|
||||
/* Walk through entries, checking offsets. */
|
||||
|
@ -665,7 +661,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
repl->underflow,
|
||||
repl->valid_hooks);
|
||||
if (ret != 0)
|
||||
break;
|
||||
goto out_free;
|
||||
if (i < repl->num_entries)
|
||||
offsets[i] = (void *)iter - entry0;
|
||||
++i;
|
||||
if (strcmp(arpt_get_target(iter)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
|
@ -673,12 +671,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
}
|
||||
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
goto out_free;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (i != repl->num_entries) {
|
||||
duprintf("translate_table: %u not %u entries\n",
|
||||
i, repl->num_entries);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
|
@ -689,17 +688,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, repl->hook_entry[i]);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
if (newinfo->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, repl->underflow[i]);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
||||
return -ELOOP;
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
|
||||
ret = -ELOOP;
|
||||
goto out_free;
|
||||
}
|
||||
kvfree(offsets);
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
|
@ -719,6 +721,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
out_free:
|
||||
kvfree(offsets);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1336,8 +1341,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
|
|||
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = info->hook_entry[i];
|
||||
newinfo->underflow[i] = info->underflow[i];
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
newinfo->underflow[i] = compatr->underflow[i];
|
||||
}
|
||||
entry1 = newinfo->entries;
|
||||
pos = entry1;
|
||||
|
|
|
@ -443,23 +443,12 @@ ipt_do_table(struct sk_buff *skb,
|
|||
#endif
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct ipt_entry *target)
|
||||
{
|
||||
struct ipt_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
unsigned int valid_hooks, void *entry0,
|
||||
unsigned int *offsets)
|
||||
{
|
||||
unsigned int hook;
|
||||
|
||||
|
@ -552,10 +541,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
/* This a jump; chase it. */
|
||||
duprintf("Jump rule %u -> %u\n",
|
||||
pos, newpos);
|
||||
if (!xt_find_jump_offset(offsets, newpos,
|
||||
newinfo->number))
|
||||
return 0;
|
||||
e = (struct ipt_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
|
@ -663,10 +653,12 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
|||
unsigned int j;
|
||||
struct xt_mtchk_param mtpar;
|
||||
struct xt_entry_match *ematch;
|
||||
unsigned long pcnt;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(pcnt))
|
||||
return -ENOMEM;
|
||||
e->counters.pcnt = pcnt;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
|
@ -811,6 +803,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
const struct ipt_replace *repl)
|
||||
{
|
||||
struct ipt_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -824,6 +817,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
}
|
||||
|
||||
duprintf("translate_table: size %u\n", newinfo->size);
|
||||
offsets = xt_alloc_entry_offsets(newinfo->number);
|
||||
if (!offsets)
|
||||
return -ENOMEM;
|
||||
i = 0;
|
||||
/* Walk through entries, checking offsets. */
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
|
@ -833,17 +829,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
repl->underflow,
|
||||
repl->valid_hooks);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
goto out_free;
|
||||
if (i < repl->num_entries)
|
||||
offsets[i] = (void *)iter - entry0;
|
||||
++i;
|
||||
if (strcmp(ipt_get_target(iter)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (i != repl->num_entries) {
|
||||
duprintf("translate_table: %u not %u entries\n",
|
||||
i, repl->num_entries);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
|
@ -854,17 +853,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, repl->hook_entry[i]);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
if (newinfo->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, repl->underflow[i]);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
||||
return -ELOOP;
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
|
||||
ret = -ELOOP;
|
||||
goto out_free;
|
||||
}
|
||||
kvfree(offsets);
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
|
@ -884,6 +886,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
out_free:
|
||||
kvfree(offsets);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,6 +124,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
/* ip_route_me_harder expects skb->dst to be set */
|
||||
skb_dst_set_noref(nskb, skb_dst(oldskb));
|
||||
|
||||
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
|
||||
|
||||
skb_reserve(nskb, LL_MAX_HEADER);
|
||||
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
|
||||
ip4_dst_hoplimit(skb_dst(nskb)));
|
||||
|
|
|
@ -2185,6 +2185,9 @@ adjudge_to_death:
|
|||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPABORTONMEMORY);
|
||||
} else if (!check_net(sock_net(sk))) {
|
||||
/* Not possible to send reset; just close */
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,11 +80,19 @@ static void tcp_write_err(struct sock *sk)
|
|||
* to prevent DoS attacks. It is called when a retransmission timeout
|
||||
* or zero probe timeout occurs on orphaned socket.
|
||||
*
|
||||
* Also close if our net namespace is exiting; in that case there is no
|
||||
* hope of ever communicating again since all netns interfaces are already
|
||||
* down (or about to be down), and we need to release our dst references,
|
||||
* which have been moved to the netns loopback interface, so the namespace
|
||||
* can finish exiting. This condition is only possible if we are a kernel
|
||||
* socket, as those do not hold references to the namespace.
|
||||
*
|
||||
* Criteria is still not confirmed experimentally and may change.
|
||||
* We kill the socket, if:
|
||||
* 1. If number of orphaned sockets exceeds an administratively configured
|
||||
* limit.
|
||||
* 2. If we have strong memory pressure.
|
||||
* 3. If our net namespace is exiting.
|
||||
*/
|
||||
static int tcp_out_of_resources(struct sock *sk, bool do_reset)
|
||||
{
|
||||
|
@ -113,6 +121,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
|
|||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!check_net(sock_net(sk))) {
|
||||
/* Not possible to send reset; just close */
|
||||
tcp_done(sk);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
}
|
||||
|
||||
static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
|
||||
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
|
||||
{
|
||||
if (!np->autoflowlabel_set)
|
||||
return ip6_default_np_autolabel(net);
|
||||
|
@ -1246,14 +1246,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
|
|||
v6_cork->tclass = tclass;
|
||||
if (rt->dst.flags & DST_XFRM_TUNNEL)
|
||||
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
|
||||
rt->dst.dev->mtu : dst_mtu(&rt->dst);
|
||||
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
|
||||
else
|
||||
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
|
||||
rt->dst.dev->mtu : dst_mtu(rt->dst.path);
|
||||
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
|
||||
if (np->frag_size < mtu) {
|
||||
if (np->frag_size)
|
||||
mtu = np->frag_size;
|
||||
}
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
return -EINVAL;
|
||||
cork->base.fragsize = mtu;
|
||||
if (dst_allfrag(rt->dst.path))
|
||||
cork->base.flags |= IPCORK_ALLFRAG;
|
||||
|
@ -1783,6 +1785,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
|
|||
cork.base.flags = 0;
|
||||
cork.base.addr = 0;
|
||||
cork.base.opt = NULL;
|
||||
cork.base.dst = NULL;
|
||||
v6_cork.opt = NULL;
|
||||
err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
|
||||
if (err) {
|
||||
|
|
|
@ -1313,7 +1313,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
break;
|
||||
|
||||
case IPV6_AUTOFLOWLABEL:
|
||||
val = np->autoflowlabel;
|
||||
val = ip6_autoflowlabel(sock_net(sk), np);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -459,23 +459,12 @@ ip6t_do_table(struct sk_buff *skb,
|
|||
#endif
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct ip6t_entry *target)
|
||||
{
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
unsigned int valid_hooks, void *entry0,
|
||||
unsigned int *offsets)
|
||||
{
|
||||
unsigned int hook;
|
||||
|
||||
|
@ -568,10 +557,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
/* This a jump; chase it. */
|
||||
duprintf("Jump rule %u -> %u\n",
|
||||
pos, newpos);
|
||||
if (!xt_find_jump_offset(offsets, newpos,
|
||||
newinfo->number))
|
||||
return 0;
|
||||
e = (struct ip6t_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
|
@ -680,10 +670,12 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
|||
unsigned int j;
|
||||
struct xt_mtchk_param mtpar;
|
||||
struct xt_entry_match *ematch;
|
||||
unsigned long pcnt;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(pcnt))
|
||||
return -ENOMEM;
|
||||
e->counters.pcnt = pcnt;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
|
@ -827,6 +819,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
const struct ip6t_replace *repl)
|
||||
{
|
||||
struct ip6t_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -840,6 +833,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
}
|
||||
|
||||
duprintf("translate_table: size %u\n", newinfo->size);
|
||||
offsets = xt_alloc_entry_offsets(newinfo->number);
|
||||
if (!offsets)
|
||||
return -ENOMEM;
|
||||
i = 0;
|
||||
/* Walk through entries, checking offsets. */
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
|
@ -849,17 +845,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
repl->underflow,
|
||||
repl->valid_hooks);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
goto out_free;
|
||||
if (i < repl->num_entries)
|
||||
offsets[i] = (void *)iter - entry0;
|
||||
++i;
|
||||
if (strcmp(ip6t_get_target(iter)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (i != repl->num_entries) {
|
||||
duprintf("translate_table: %u not %u entries\n",
|
||||
i, repl->num_entries);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
|
@ -870,17 +869,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, repl->hook_entry[i]);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
if (newinfo->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, repl->underflow[i]);
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
||||
return -ELOOP;
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
|
||||
ret = -ELOOP;
|
||||
goto out_free;
|
||||
}
|
||||
kvfree(offsets);
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
|
@ -900,6 +902,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
|||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
out_free:
|
||||
kvfree(offsets);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
|
|||
fl6.daddr = *gw;
|
||||
fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
|
||||
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
|
||||
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
dst_release(dst);
|
||||
|
|
|
@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
fl6.daddr = oip6h->saddr;
|
||||
fl6.fl6_sport = otcph->dest;
|
||||
fl6.fl6_dport = otcph->source;
|
||||
fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
|
||||
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst == NULL || dst->error) {
|
||||
|
@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
|
||||
skb_dst_set(nskb, dst);
|
||||
|
||||
nskb->mark = fl6.flowi6_mark;
|
||||
|
||||
skb_reserve(nskb, hh_len + dst->header_len);
|
||||
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
|
||||
ip6_dst_hoplimit(dst));
|
||||
|
|
|
@ -719,6 +719,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|||
* least once for the stats anyway.
|
||||
*/
|
||||
rcu_read_lock_bh();
|
||||
begin:
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (ct != ignored_conntrack &&
|
||||
|
@ -730,6 +731,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|||
}
|
||||
NF_CT_STAT_INC(net, searched);
|
||||
}
|
||||
|
||||
if (get_nulls_value(n) != hash) {
|
||||
NF_CT_STAT_INC(net, search_restart);
|
||||
goto begin;
|
||||
}
|
||||
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -560,7 +560,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
|
|||
helper = rcu_dereference(nfct_help(expect->master)->helper);
|
||||
if (helper) {
|
||||
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
|
||||
if (helper->expect_policy[expect->class].name)
|
||||
if (helper->expect_policy[expect->class].name[0])
|
||||
seq_printf(s, "/%s",
|
||||
helper->expect_policy[expect->class].name);
|
||||
}
|
||||
|
|
|
@ -1435,9 +1435,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
|
|||
handler = &sip_handlers[i];
|
||||
if (handler->request == NULL)
|
||||
continue;
|
||||
if (*datalen < handler->len ||
|
||||
if (*datalen < handler->len + 2 ||
|
||||
strncasecmp(*dptr, handler->method, handler->len))
|
||||
continue;
|
||||
if ((*dptr)[handler->len] != ' ' ||
|
||||
!isalpha((*dptr)[handler->len+1]))
|
||||
continue;
|
||||
|
||||
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
|
||||
&matchoff, &matchlen) <= 0) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/capability.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
|
@ -392,6 +393,9 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
|
|||
struct nfnl_cthelper *nlcth;
|
||||
int ret = 0;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -595,6 +599,9 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
|
|||
struct nfnl_cthelper *nlcth;
|
||||
bool tuple_set = false;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
.dump = nfnl_cthelper_dump_table,
|
||||
|
@ -661,6 +668,9 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
|
|||
struct nfnl_cthelper *nlcth, *n;
|
||||
int j = 0, ret;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (tb[NFCTH_NAME])
|
||||
helper_name = nla_data(tb[NFCTH_NAME]);
|
||||
|
||||
|
|
|
@ -1053,10 +1053,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
|
|||
struct net *net = sock_net(ctnl);
|
||||
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
|
||||
|
||||
queue = instance_lookup(q, queue_num);
|
||||
if (!queue)
|
||||
queue = verdict_instance_lookup(q, queue_num,
|
||||
NETLINK_CB(skb).portid);
|
||||
queue = verdict_instance_lookup(q, queue_num,
|
||||
NETLINK_CB(skb).portid);
|
||||
if (IS_ERR(queue))
|
||||
return PTR_ERR(queue);
|
||||
|
||||
|
|
|
@ -701,6 +701,56 @@ int xt_check_entry_offsets(const void *base,
|
|||
}
|
||||
EXPORT_SYMBOL(xt_check_entry_offsets);
|
||||
|
||||
/**
|
||||
* xt_alloc_entry_offsets - allocate array to store rule head offsets
|
||||
*
|
||||
* @size: number of entries
|
||||
*
|
||||
* Return: NULL or kmalloc'd or vmalloc'd array
|
||||
*/
|
||||
unsigned int *xt_alloc_entry_offsets(unsigned int size)
|
||||
{
|
||||
unsigned int *off;
|
||||
|
||||
off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
|
||||
|
||||
if (off)
|
||||
return off;
|
||||
|
||||
if (size < (SIZE_MAX / sizeof(unsigned int)))
|
||||
off = vmalloc(size * sizeof(unsigned int));
|
||||
|
||||
return off;
|
||||
}
|
||||
EXPORT_SYMBOL(xt_alloc_entry_offsets);
|
||||
|
||||
/**
|
||||
* xt_find_jump_offset - check if target is a valid jump offset
|
||||
*
|
||||
* @offsets: array containing all valid rule start offsets of a rule blob
|
||||
* @target: the jump target to search for
|
||||
* @size: entries in @offset
|
||||
*/
|
||||
bool xt_find_jump_offset(const unsigned int *offsets,
|
||||
unsigned int target, unsigned int size)
|
||||
{
|
||||
int m, low = 0, hi = size;
|
||||
|
||||
while (hi > low) {
|
||||
m = (low + hi) / 2u;
|
||||
|
||||
if (offsets[m] > target)
|
||||
hi = m;
|
||||
else if (offsets[m] < target)
|
||||
low = m + 1;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(xt_find_jump_offset);
|
||||
|
||||
int xt_check_target(struct xt_tgchk_param *par,
|
||||
unsigned int size, u_int8_t proto, bool inv_proto)
|
||||
{
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/ip.h>
|
||||
|
@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
|
|||
struct xt_osf_finger *kf = NULL, *sf;
|
||||
int err = 0;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!osf_attrs[OSF_ATTR_FINGER])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -112,6 +116,9 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
|
|||
struct xt_osf_finger *sf;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!osf_attrs[OSF_ATTR_FINGER])
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@
|
|||
static int sctp_writeable(struct sock *sk);
|
||||
static void sctp_wfree(struct sk_buff *skb);
|
||||
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
size_t msg_len, struct sock **orig_sk);
|
||||
size_t msg_len);
|
||||
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
|
||||
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
|
||||
static int sctp_wait_for_accept(struct sock *sk, long timeo);
|
||||
|
@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
|
|||
if (len < sizeof (struct sockaddr))
|
||||
return NULL;
|
||||
|
||||
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
|
||||
return NULL;
|
||||
|
||||
/* V4 mapped address are really of AF_INET family */
|
||||
if (addr->sa.sa_family == AF_INET6 &&
|
||||
ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
|
||||
if (!opt->pf->af_supported(AF_INET, opt))
|
||||
return NULL;
|
||||
} else {
|
||||
/* Does this PF support this AF? */
|
||||
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
|
||||
return NULL;
|
||||
}
|
||||
ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
|
||||
!opt->pf->af_supported(AF_INET, opt))
|
||||
return NULL;
|
||||
|
||||
/* If we get this far, af is valid. */
|
||||
af = sctp_get_af_specific(addr->sa.sa_family);
|
||||
|
@ -1954,7 +1952,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
|||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
if (!sctp_wspace(asoc)) {
|
||||
/* sk can be changed by peel off when waiting for buf. */
|
||||
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
|
||||
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
|
||||
if (err) {
|
||||
if (err == -ESRCH) {
|
||||
/* asoc is already dead. */
|
||||
|
@ -6976,12 +6974,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
|
|||
|
||||
/* Helper function to wait for space in the sndbuf. */
|
||||
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
||||
size_t msg_len, struct sock **orig_sk)
|
||||
size_t msg_len)
|
||||
{
|
||||
struct sock *sk = asoc->base.sk;
|
||||
int err = 0;
|
||||
long current_timeo = *timeo_p;
|
||||
DEFINE_WAIT(wait);
|
||||
int err = 0;
|
||||
|
||||
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
|
||||
*timeo_p, msg_len);
|
||||
|
@ -7010,17 +7008,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|||
release_sock(sk);
|
||||
current_timeo = schedule_timeout(current_timeo);
|
||||
lock_sock(sk);
|
||||
if (sk != asoc->base.sk) {
|
||||
release_sock(sk);
|
||||
sk = asoc->base.sk;
|
||||
lock_sock(sk);
|
||||
}
|
||||
if (sk != asoc->base.sk)
|
||||
goto do_error;
|
||||
|
||||
*timeo_p = current_timeo;
|
||||
}
|
||||
|
||||
out:
|
||||
*orig_sk = sk;
|
||||
finish_wait(&asoc->wait, &wait);
|
||||
|
||||
/* Release the association's refcnt. */
|
||||
|
|
|
@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
|
|||
struct usbip_usb_interface *uinf)
|
||||
{
|
||||
char busid[SYSFS_BUS_ID_SIZE];
|
||||
int size;
|
||||
struct udev_device *sif;
|
||||
|
||||
sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
|
||||
size = snprintf(busid, sizeof(busid), "%s:%d.%d",
|
||||
udev->busid, udev->bConfigurationValue, i);
|
||||
if (size < 0 || (unsigned int)size >= sizeof(busid)) {
|
||||
err("busid length %i >= %lu or < 0", size,
|
||||
(unsigned long)sizeof(busid));
|
||||
return -1;
|
||||
}
|
||||
|
||||
sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
|
||||
if (!sif) {
|
||||
|
|
|
@ -39,13 +39,19 @@ struct udev *udev_context;
|
|||
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
|
||||
{
|
||||
char status_attr_path[SYSFS_PATH_MAX];
|
||||
int size;
|
||||
int fd;
|
||||
int length;
|
||||
char status;
|
||||
int value = 0;
|
||||
|
||||
snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
|
||||
udev->path);
|
||||
size = snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
|
||||
udev->path);
|
||||
if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
|
||||
err("usbip_status path length %i >= %lu or < 0", size,
|
||||
(unsigned long)sizeof(status_attr_path));
|
||||
return -1;
|
||||
}
|
||||
|
||||
fd = open(status_attr_path, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
|
@ -225,6 +231,7 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
|
|||
{
|
||||
char attr_name[] = "usbip_sockfd";
|
||||
char sockfd_attr_path[SYSFS_PATH_MAX];
|
||||
int size;
|
||||
char sockfd_buff[30];
|
||||
int ret;
|
||||
|
||||
|
@ -244,10 +251,20 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
|
|||
}
|
||||
|
||||
/* only the first interface is true */
|
||||
snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
|
||||
edev->udev.path, attr_name);
|
||||
size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
|
||||
edev->udev.path, attr_name);
|
||||
if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
|
||||
err("exported device path length %i >= %lu or < 0", size,
|
||||
(unsigned long)sizeof(sockfd_attr_path));
|
||||
return -1;
|
||||
}
|
||||
|
||||
snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
|
||||
size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
|
||||
if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
|
||||
err("socket length %i >= %lu or < 0", size,
|
||||
(unsigned long)sizeof(sockfd_buff));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
|
||||
strlen(sockfd_buff));
|
||||
|
|
|
@ -55,12 +55,12 @@ static int parse_status(const char *value)
|
|||
|
||||
while (*c != '\0') {
|
||||
int port, status, speed, devid;
|
||||
unsigned long socket;
|
||||
int sockfd;
|
||||
char lbusid[SYSFS_BUS_ID_SIZE];
|
||||
|
||||
ret = sscanf(c, "%d %d %d %x %lx %31s\n",
|
||||
ret = sscanf(c, "%d %d %d %x %u %31s\n",
|
||||
&port, &status, &speed,
|
||||
&devid, &socket, lbusid);
|
||||
&devid, &sockfd, lbusid);
|
||||
|
||||
if (ret < 5) {
|
||||
dbg("sscanf failed: %d", ret);
|
||||
|
@ -69,7 +69,7 @@ static int parse_status(const char *value)
|
|||
|
||||
dbg("port %d status %d speed %d devid %x",
|
||||
port, status, speed, devid);
|
||||
dbg("socket %lx lbusid %s", socket, lbusid);
|
||||
dbg("sockfd %u lbusid %s", sockfd, lbusid);
|
||||
|
||||
|
||||
/* if a device is connected, look at it */
|
||||
|
|
|
@ -176,6 +176,8 @@ int main(int argc, char *argv[])
|
|||
break;
|
||||
case '?':
|
||||
printf("usbip: invalid option\n");
|
||||
/* Terminate after printing error */
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
usbip_usage();
|
||||
goto out;
|
||||
|
|
Loading…
Add table
Reference in a new issue