This is the 4.4.76 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllc3f0ACgkQONu9yGCS aT4fmA/+OHeYbhpaMRKqrUpsxB3NpROr2Z47ow6vaVjYZzd0irrODLlfIfDQ6EEo N3v28povu16VeYXk+4h8bsAP2K2j6/BlRaSi2hB6dmnY8GDMaXEfRojPYAlzVz50 qnK/6152siDDarUx1h5Zc8GcmX/tEl6h3bOOxDcwLR+RvyIcWxenuR+uqRM/AV6o BPEiOuMu7P6LjID7KYgBTFNajVBMLrDXt4SCWdzOZmlNt0QXgKB9yw68vTcc+edC ZcXqa0M6nEWSDvwobbwBZhFL8H2dJjzweyjeFBgxnxgmOrRh6kvZG2wsz2c8O3/P g8TuMxU7siu+I3lFwKy+dgZ/1REz+6Q3oFBqXsuddrcPYu23rV6mz/GxqWy4cerb M4eTWz6L9vA2GoYpvBaWi0tKC9tkNM49g48Y24a6CW1O4dJWlz3RrpTiZmequbNF mo8EKomSXn4kYAm1xT03DGljQkK/i2JtyI5sk2hLEqqxKvZ/3q9xxLLKOVx8dPvs PIbfpapfYMXXMWgR6e+UKueNLgevfWE12X/OU4SgvSY4n/07/mH40XEd3zd82IsZ 1Mw0qj3JnqCAFDBBMsDYa+OvABaGD1dHARuiv+aeqW8tqoBglFHxWqF+SQVNXLIE qTLiKz78vjQpH0zGpkA3HEOh/h4L7a0y3qRMECsk5SUxXsgu1gg= =bwNU -----END PGP SIGNATURE----- Merge 4.4.76 into android-4.4 Changes in 4.4.76 ipv6: release dst on error in ip6_dst_lookup_tail net: don't call strlen on non-terminated string in dev_set_alias() decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb net: Zero ifla_vf_info in rtnl_fill_vfinfo() af_unix: Add sockaddr length checks before accessing sa_family in bind and connect handlers Fix an intermittent pr_emerg warning about lo becoming free. net: caif: Fix a sleep-in-atomic bug in cfpkt_create_pfx igmp: acquire pmc lock for ip_mc_clear_src() igmp: add a missing spin_lock_init() ipv6: fix calling in6_ifa_hold incorrectly for dad work net/mlx5: Wait for FW readiness before initializing command interface decnet: always not take dst->__refcnt when inserting dst into hash table net: 8021q: Fix one possible panic caused by BUG_ON in free_netdev sfc: provide dummy definitions of vswitch functions ipv6: Do not leak throw route references rtnetlink: add IFLA_GROUP to ifla_policy netfilter: xt_TCPMSS: add more sanity tests on tcph->doff netfilter: synproxy: fix conntrackd interaction NFSv4: fix a reference leak caused WARNING messages drm/ast: Handle configuration without P2A bridge mm, swap_cgroup: reschedule when neeed in swap_cgroup_swapoff() MIPS: Avoid accidental raw backtrace MIPS: pm-cps: Drop manual cache-line alignment of ready_count MIPS: Fix IRQ tracing & lockdep when rescheduling ALSA: hda - Fix endless loop of codec configure ALSA: hda - set input_path bitmap to zero after moving it to new place drm/vmwgfx: Free hash table allocated by cmdbuf managed res mgr usb: gadget: f_fs: Fix possibe deadlock sysctl: enable strict writes block: fix module reference leak on put_disk() call for cgroups throttle mm: numa: avoid waiting on freed migrated pages KVM: x86: fix fixing of hypercalls scsi: sd: Fix wrong DPOFUA disable in sd_read_cache_type scsi: lpfc: Set elsiocb contexts to NULL after freeing it qla2xxx: Fix erroneous invalid handle message ARM: dts: BCM5301X: Correct GIC_PPI interrupt flags net: mvneta: Fix for_each_present_cpu usage MIPS: ath79: fix regression in PCI window initialization net: korina: Fix NAPI versus resources freeing MIPS: ralink: MT7688 pinmux fixes MIPS: ralink: fix USB frequency scaling MIPS: ralink: Fix invalid assignment of SoC type MIPS: ralink: fix MT7628 pinmux typos MIPS: ralink: fix MT7628 wled_an pinmux gpio mtd: bcm47xxpart: limit scanned flash area on BCM47XX (MIPS) only bgmac: fix a missing check for build_skb mtd: bcm47xxpart: don't fail because of bit-flips bgmac: Fix reversed test of build_skb() return value. net: bgmac: Fix SOF bit checking net: bgmac: Start transmit queue in bgmac_open net: bgmac: Remove superflous netif_carrier_on() powerpc/eeh: Enable IO path on permanent error gianfar: Do not reuse pages from emergency reserve Btrfs: fix truncate down when no_holes feature is enabled virtio_console: fix a crash in config_work_handler swiotlb-xen: update dev_addr after swapping pages xen-netfront: Fix Rx stall during network stress and OOM scsi: virtio_scsi: Reject commands when virtqueue is broken platform/x86: ideapad-laptop: handle ACPI event 1 amd-xgbe: Check xgbe_init() return code net: dsa: Check return value of phy_connect_direct() drm/amdgpu: check ring being ready before using vfio/spapr: fail tce_iommu_attach_group() when iommu_data is null virtio_net: fix PAGE_SIZE > 64k vxlan: do not age static remote mac entries ibmveth: Add a proper check for the availability of the checksum features kernel/panic.c: add missing \n HID: i2c-hid: Add sleep between POWER ON and RESET scsi: lpfc: avoid double free of resource identifiers spi: davinci: use dma_mapping_error() mac80211: initialize SMPS field in HT capabilities x86/mpx: Use compatible types in comparison to fix sparse error coredump: Ensure proper size of sparse core files swiotlb: ensure that page-sized mappings are page-aligned s390/ctl_reg: make __ctl_load a full memory barrier be2net: fix status check in be_cmd_pmac_add() perf probe: Fix to show correct locations for events on modules net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV sctp: check af before verify address in sctp_addr_id2transport ravb: Fix use-after-free on `ifconfig eth0 down` jump label: fix passing kbuild_cflags when checking for asm goto support xfrm: fix stack access out of bounds with CONFIG_XFRM_SUB_POLICY xfrm: NULL dereference on allocation failure xfrm: Oops on error in pfkey_msg2xfrm_state() watchdog: bcm281xx: Fix use of uninitialized spinlock. sched/loadavg: Avoid loadavg spikes caused by delayed NO_HZ accounting ARM64/ACPI: Fix BAD_MADT_GICC_ENTRY() macro implementation ARM: 8685/1: ensure memblock-limit is pmd-aligned x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space x86/mm: Fix flush_tlb_page() on Xen ocfs2: o2hb: revert hb threshold to keep compatible iommu/vt-d: Don't over-free page table directories iommu: Handle default domain attach failure iommu/amd: Fix incorrect error handling in amd_iommu_bind_pasid() cpufreq: s3c2416: double free on driver init error path KVM: x86: fix emulation of RSM and IRET instructions KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() KVM: x86: zero base3 of unusable segments KVM: nVMX: Fix exception injection Linux 4.4.76 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
64a73ff728
96 changed files with 669 additions and 444 deletions
|
@ -825,14 +825,13 @@ via the /proc/sys interface:
|
|||
Each write syscall must fully contain the sysctl value to be
|
||||
written, and multiple writes on the same sysctl file descriptor
|
||||
will rewrite the sysctl value, regardless of file position.
|
||||
0 - (default) Same behavior as above, but warn about processes that
|
||||
perform writes to a sysctl file descriptor when the file position
|
||||
is not 0.
|
||||
1 - Respect file position when writing sysctl strings. Multiple writes
|
||||
will append to the sysctl value buffer. Anything past the max length
|
||||
of the sysctl value buffer will be ignored. Writes to numeric sysctl
|
||||
entries must always be at file position 0 and the value must be
|
||||
fully contained in the buffer sent in the write syscall.
|
||||
0 - Same behavior as above, but warn about processes that perform writes
|
||||
to a sysctl file descriptor when the file position is not 0.
|
||||
1 - (default) Respect file position when writing sysctl strings. Multiple
|
||||
writes will append to the sysctl value buffer. Anything past the max
|
||||
length of the sysctl value buffer will be ignored. Writes to numeric
|
||||
sysctl entries must always be at file position 0 and the value must
|
||||
be fully contained in the buffer sent in the write syscall.
|
||||
|
||||
==============================================================
|
||||
|
||||
|
|
14
Makefile
14
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 75
|
||||
SUBLEVEL = 76
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
@ -633,6 +633,12 @@ endif
|
|||
# Tell gcc to never replace conditional load with a non-conditional one
|
||||
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
||||
ifdef CONFIG_READABLE_ASM
|
||||
# Disable optimizations that make assembler listings hard to read.
|
||||
# reorder blocks reorders the control in the function
|
||||
|
@ -788,12 +794,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
|
|||
# use the deterministic mode of AR if available
|
||||
KBUILD_ARFLAGS := $(call ar-option,D)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
||||
include scripts/Makefile.kasan
|
||||
include scripts/Makefile.extrawarn
|
||||
|
||||
|
|
|
@ -54,14 +54,14 @@
|
|||
timer@0200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x0200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&clk_periph>;
|
||||
};
|
||||
|
||||
local-timer@0600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x0600 0x100>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&clk_periph>;
|
||||
};
|
||||
|
||||
|
|
|
@ -1184,15 +1184,15 @@ void __init sanity_check_meminfo(void)
|
|||
|
||||
high_memory = __va(arm_lowmem_limit - 1) + 1;
|
||||
|
||||
if (!memblock_limit)
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
|
||||
/*
|
||||
* Round the memblock limit down to a pmd size. This
|
||||
* helps to ensure that we will allocate memory from the
|
||||
* last full pmd, which should be mapped.
|
||||
*/
|
||||
if (memblock_limit)
|
||||
memblock_limit = round_down(memblock_limit, PMD_SIZE);
|
||||
if (!memblock_limit)
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
memblock_limit = round_down(memblock_limit, PMD_SIZE);
|
||||
|
||||
memblock_set_current_limit(memblock_limit);
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
#define ACPI_MADT_GICC_LENGTH \
|
||||
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
|
||||
|
||||
#define BAD_MADT_GICC_ENTRY(entry, end) \
|
||||
(!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
|
||||
(entry)->header.length != ACPI_MADT_GICC_LENGTH)
|
||||
#define BAD_MADT_GICC_ENTRY(entry, end) \
|
||||
(!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
|
||||
(unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
|
||||
|
||||
/* Basic configuration for ACPI */
|
||||
#ifdef CONFIG_ACPI
|
||||
|
|
|
@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void)
|
|||
{
|
||||
BUG_ON(!ath79_ddr_pci_win_base);
|
||||
|
||||
__raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0);
|
||||
__raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1);
|
||||
__raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2);
|
||||
__raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3);
|
||||
__raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4);
|
||||
__raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5);
|
||||
__raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6);
|
||||
__raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7);
|
||||
__raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
|
||||
__raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
|
||||
__raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
|
||||
__raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
|
||||
__raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
|
||||
__raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
|
||||
__raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
|
||||
__raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
@ -137,6 +138,7 @@ work_pending:
|
|||
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
|
||||
beqz t0, work_notifysig
|
||||
work_resched:
|
||||
TRACE_IRQS_OFF
|
||||
jal schedule
|
||||
|
||||
local_irq_disable # make sure need_resched and
|
||||
|
@ -173,6 +175,7 @@ syscall_exit_work:
|
|||
beqz t0, work_pending # trace bit set?
|
||||
local_irq_enable # could let syscall_trace_leave()
|
||||
# call schedule() instead
|
||||
TRACE_IRQS_ON
|
||||
move a0, sp
|
||||
jal syscall_trace_leave
|
||||
b resume_userspace
|
||||
|
|
|
@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
|
|||
* state. Actually per-core rather than per-CPU.
|
||||
*/
|
||||
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
|
||||
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
|
||||
|
||||
/* Indicates online CPUs coupled with the current CPU */
|
||||
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
|
||||
|
@ -625,7 +624,6 @@ static int __init cps_gen_core_entries(unsigned cpu)
|
|||
{
|
||||
enum cps_pm_state state;
|
||||
unsigned core = cpu_data[cpu].core;
|
||||
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
|
||||
void *entry_fn, *core_rc;
|
||||
|
||||
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
|
||||
|
@ -645,16 +643,11 @@ static int __init cps_gen_core_entries(unsigned cpu)
|
|||
}
|
||||
|
||||
if (!per_cpu(ready_count, core)) {
|
||||
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
|
||||
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
|
||||
if (!core_rc) {
|
||||
pr_err("Failed allocate core %u ready_count\n", core);
|
||||
return -ENOMEM;
|
||||
}
|
||||
per_cpu(ready_count_alloc, core) = core_rc;
|
||||
|
||||
/* Ensure ready_count is aligned to a cacheline boundary */
|
||||
core_rc += dlinesz - 1;
|
||||
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
|
||||
per_cpu(ready_count, core) = core_rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -194,6 +194,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
|||
{
|
||||
struct pt_regs regs;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
regs.cp0_status = KSU_KERNEL;
|
||||
if (sp) {
|
||||
regs.regs[29] = (unsigned long)sp;
|
||||
regs.regs[31] = 0;
|
||||
|
|
|
@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
|
|||
};
|
||||
|
||||
static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
|
||||
FUNC("sdcx", 3, 19, 1),
|
||||
FUNC("sdxc d6", 3, 19, 1),
|
||||
FUNC("utif", 2, 19, 1),
|
||||
FUNC("gpio", 1, 19, 1),
|
||||
FUNC("pwm", 0, 19, 1),
|
||||
FUNC("pwm1", 0, 19, 1),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
|
||||
FUNC("sdcx", 3, 18, 1),
|
||||
FUNC("sdxc d7", 3, 18, 1),
|
||||
FUNC("utif", 2, 18, 1),
|
||||
FUNC("gpio", 1, 18, 1),
|
||||
FUNC("pwm", 0, 18, 1),
|
||||
FUNC("pwm0", 0, 18, 1),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func uart2_grp_mt7628[] = {
|
||||
FUNC("sdcx", 3, 20, 2),
|
||||
FUNC("sdxc d5 d4", 3, 20, 2),
|
||||
FUNC("pwm", 2, 20, 2),
|
||||
FUNC("gpio", 1, 20, 2),
|
||||
FUNC("uart", 0, 20, 2),
|
||||
FUNC("uart2", 0, 20, 2),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func uart1_grp_mt7628[] = {
|
||||
FUNC("sdcx", 3, 45, 2),
|
||||
FUNC("sw_r", 3, 45, 2),
|
||||
FUNC("pwm", 2, 45, 2),
|
||||
FUNC("gpio", 1, 45, 2),
|
||||
FUNC("uart", 0, 45, 2),
|
||||
FUNC("uart1", 0, 45, 2),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func i2c_grp_mt7628[] = {
|
||||
|
@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
|
|||
|
||||
static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
|
||||
static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
|
||||
static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
|
||||
static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
|
||||
static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
|
||||
|
||||
static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
|
||||
FUNC("jtag", 3, 22, 8),
|
||||
FUNC("utif", 2, 22, 8),
|
||||
FUNC("gpio", 1, 22, 8),
|
||||
FUNC("sdcx", 0, 22, 8),
|
||||
FUNC("sdxc", 0, 22, 8),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func uart0_grp_mt7628[] = {
|
||||
FUNC("-", 3, 12, 2),
|
||||
FUNC("-", 2, 12, 2),
|
||||
FUNC("gpio", 1, 12, 2),
|
||||
FUNC("uart", 0, 12, 2),
|
||||
FUNC("uart0", 0, 12, 2),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func i2s_grp_mt7628[] = {
|
||||
|
@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
|
|||
FUNC("-", 3, 6, 1),
|
||||
FUNC("refclk", 2, 6, 1),
|
||||
FUNC("gpio", 1, 6, 1),
|
||||
FUNC("spi", 0, 6, 1),
|
||||
FUNC("spi cs1", 0, 6, 1),
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_func spis_grp_mt7628[] = {
|
||||
|
@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
|
|||
FUNC("gpio", 0, 11, 1),
|
||||
};
|
||||
|
||||
#define MT7628_GPIO_MODE_MASK 0x3
|
||||
static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
|
||||
FUNC("rsvd", 3, 35, 1),
|
||||
FUNC("rsvd", 2, 35, 1),
|
||||
FUNC("gpio", 1, 35, 1),
|
||||
FUNC("wled_kn", 0, 35, 1),
|
||||
};
|
||||
|
||||
#define MT7628_GPIO_MODE_PWM1 30
|
||||
#define MT7628_GPIO_MODE_PWM0 28
|
||||
#define MT7628_GPIO_MODE_UART2 26
|
||||
#define MT7628_GPIO_MODE_UART1 24
|
||||
#define MT7628_GPIO_MODE_I2C 20
|
||||
#define MT7628_GPIO_MODE_REFCLK 18
|
||||
#define MT7628_GPIO_MODE_PERST 16
|
||||
#define MT7628_GPIO_MODE_WDT 14
|
||||
#define MT7628_GPIO_MODE_SPI 12
|
||||
#define MT7628_GPIO_MODE_SDMODE 10
|
||||
#define MT7628_GPIO_MODE_UART0 8
|
||||
#define MT7628_GPIO_MODE_I2S 6
|
||||
#define MT7628_GPIO_MODE_CS1 4
|
||||
#define MT7628_GPIO_MODE_SPIS 2
|
||||
#define MT7628_GPIO_MODE_GPIO 0
|
||||
static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
|
||||
FUNC("rsvd", 3, 44, 1),
|
||||
FUNC("rsvd", 2, 44, 1),
|
||||
FUNC("gpio", 1, 44, 1),
|
||||
FUNC("wled_an", 0, 44, 1),
|
||||
};
|
||||
|
||||
#define MT7628_GPIO_MODE_MASK 0x3
|
||||
|
||||
#define MT7628_GPIO_MODE_WLED_KN 48
|
||||
#define MT7628_GPIO_MODE_WLED_AN 32
|
||||
#define MT7628_GPIO_MODE_PWM1 30
|
||||
#define MT7628_GPIO_MODE_PWM0 28
|
||||
#define MT7628_GPIO_MODE_UART2 26
|
||||
#define MT7628_GPIO_MODE_UART1 24
|
||||
#define MT7628_GPIO_MODE_I2C 20
|
||||
#define MT7628_GPIO_MODE_REFCLK 18
|
||||
#define MT7628_GPIO_MODE_PERST 16
|
||||
#define MT7628_GPIO_MODE_WDT 14
|
||||
#define MT7628_GPIO_MODE_SPI 12
|
||||
#define MT7628_GPIO_MODE_SDMODE 10
|
||||
#define MT7628_GPIO_MODE_UART0 8
|
||||
#define MT7628_GPIO_MODE_I2S 6
|
||||
#define MT7628_GPIO_MODE_CS1 4
|
||||
#define MT7628_GPIO_MODE_SPIS 2
|
||||
#define MT7628_GPIO_MODE_GPIO 0
|
||||
|
||||
static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
|
||||
GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
1, MT7628_GPIO_MODE_PWM1),
|
||||
GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
1, MT7628_GPIO_MODE_PWM0),
|
||||
GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
1, MT7628_GPIO_MODE_UART2),
|
||||
|
@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
|
|||
1, MT7628_GPIO_MODE_SPIS),
|
||||
GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
1, MT7628_GPIO_MODE_GPIO),
|
||||
GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
1, MT7628_GPIO_MODE_WLED_AN),
|
||||
GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
|
||||
1, MT7628_GPIO_MODE_WLED_KN),
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
|
@ -439,7 +459,7 @@ void __init ralink_clk_init(void)
|
|||
ralink_clk_add("10000c00.uartlite", periph_rate);
|
||||
ralink_clk_add("10180000.wmac", xtal_rate);
|
||||
|
||||
if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) {
|
||||
if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) {
|
||||
/*
|
||||
* When the CPU goes into sleep mode, the BUS clock will be
|
||||
* too low for USB to function properly. Adjust the busses
|
||||
|
|
|
@ -109,5 +109,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
|
||||
|
||||
rt2880_pinmux_data = rt2880_pinmux_data_act;
|
||||
ralink_soc == RT2880_SOC;
|
||||
ralink_soc = RT2880_SOC;
|
||||
}
|
||||
|
|
|
@ -304,9 +304,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
|
|||
*
|
||||
* For pHyp, we have to enable IO for log retrieval. Otherwise,
|
||||
* 0xFF's is always returned from PCI config space.
|
||||
*
|
||||
* When the @severity is EEH_LOG_PERM, the PE is going to be
|
||||
* removed. Prior to that, the drivers for devices included in
|
||||
* the PE will be closed. The drivers rely on working IO path
|
||||
* to bring the devices to quiet state. Otherwise, PCI traffic
|
||||
* from those devices after they are removed is like to cause
|
||||
* another unexpected EEH error.
|
||||
*/
|
||||
if (!(pe->type & EEH_PE_PHB)) {
|
||||
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
|
||||
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
|
||||
severity == EEH_LOG_PERM)
|
||||
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
|
||||
|
||||
/*
|
||||
|
|
|
@ -15,7 +15,9 @@
|
|||
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
|
||||
asm volatile( \
|
||||
" lctlg %1,%2,%0\n" \
|
||||
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
|
||||
: \
|
||||
: "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
#define __ctl_store(array, low, high) { \
|
||||
|
|
|
@ -221,6 +221,9 @@ struct x86_emulate_ops {
|
|||
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
|
||||
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
|
||||
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
|
||||
|
||||
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
|
||||
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
|
||||
};
|
||||
|
||||
typedef u32 __attribute__((vector_size(16))) sse128_t;
|
||||
|
@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
|
|||
|
||||
/* interruptibility state, as a result of execution of STI or MOV SS */
|
||||
int interruptibility;
|
||||
int emul_flags;
|
||||
|
||||
bool perm_ok; /* do not check permissions if true */
|
||||
bool ud; /* inject an #UD if host doesn't support insn */
|
||||
|
|
|
@ -2531,7 +2531,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||
u64 smbase;
|
||||
int ret;
|
||||
|
||||
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
|
||||
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
|
||||
return emulate_ud(ctxt);
|
||||
|
||||
/*
|
||||
|
@ -2580,11 +2580,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||
return X86EMUL_UNHANDLEABLE;
|
||||
}
|
||||
|
||||
if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
|
||||
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
|
||||
ctxt->ops->set_nmi_mask(ctxt, false);
|
||||
|
||||
ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
|
||||
ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
|
||||
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
|
||||
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
|
@ -5296,6 +5296,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
const struct x86_emulate_ops *ops = ctxt->ops;
|
||||
int rc = X86EMUL_CONTINUE;
|
||||
int saved_dst_type = ctxt->dst.type;
|
||||
unsigned emul_flags;
|
||||
|
||||
ctxt->mem_read.pos = 0;
|
||||
|
||||
|
@ -5310,6 +5311,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
goto done;
|
||||
}
|
||||
|
||||
emul_flags = ctxt->ops->get_hflags(ctxt);
|
||||
if (unlikely(ctxt->d &
|
||||
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
|
||||
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
|
||||
|
@ -5343,7 +5345,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
|
||||
}
|
||||
|
||||
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
|
||||
if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
|
||||
rc = emulator_check_intercept(ctxt, ctxt->intercept,
|
||||
X86_ICPT_PRE_EXCEPT);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
|
@ -5372,7 +5374,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
|
||||
if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
|
||||
rc = emulator_check_intercept(ctxt, ctxt->intercept,
|
||||
X86_ICPT_POST_EXCEPT);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
|
@ -5426,7 +5428,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
|
||||
special_insn:
|
||||
|
||||
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
|
||||
if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
|
||||
rc = emulator_check_intercept(ctxt, ctxt->intercept,
|
||||
X86_ICPT_POST_MEMACCESS);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
|
|
|
@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
|||
((u64)1 << edx.split.bit_width_fixed) - 1;
|
||||
}
|
||||
|
||||
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
|
||||
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
|
||||
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
|
||||
pmu->global_ctrl_mask = ~pmu->global_ctrl;
|
||||
|
||||
|
|
|
@ -2264,7 +2264,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
|
|||
if (!(vmcs12->exception_bitmap & (1u << nr)))
|
||||
return 0;
|
||||
|
||||
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
|
||||
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
||||
vmcs_read32(VM_EXIT_INTR_INFO),
|
||||
vmcs_readl(EXIT_QUALIFICATION));
|
||||
return 1;
|
||||
|
|
|
@ -4844,6 +4844,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
|
|||
|
||||
if (var.unusable) {
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
if (base3)
|
||||
*base3 = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -4999,6 +5001,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
|
|||
kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
|
||||
}
|
||||
|
||||
static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return emul_to_vcpu(ctxt)->arch.hflags;
|
||||
}
|
||||
|
||||
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
|
||||
{
|
||||
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
|
||||
}
|
||||
|
||||
static const struct x86_emulate_ops emulate_ops = {
|
||||
.read_gpr = emulator_read_gpr,
|
||||
.write_gpr = emulator_write_gpr,
|
||||
|
@ -5038,6 +5050,8 @@ static const struct x86_emulate_ops emulate_ops = {
|
|||
.intercept = emulator_intercept,
|
||||
.get_cpuid = emulator_get_cpuid,
|
||||
.set_nmi_mask = emulator_set_nmi_mask,
|
||||
.get_hflags = emulator_get_hflags,
|
||||
.set_hflags = emulator_set_hflags,
|
||||
};
|
||||
|
||||
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
||||
|
@ -5090,7 +5104,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
|
|||
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
|
||||
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
|
||||
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
|
||||
ctxt->emul_flags = vcpu->arch.hflags;
|
||||
|
||||
init_decode_cache(ctxt);
|
||||
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
|
||||
|
@ -5486,8 +5499,6 @@ restart:
|
|||
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
|
||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
if (vcpu->arch.hflags != ctxt->emul_flags)
|
||||
kvm_set_hflags(vcpu, ctxt->emul_flags);
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE)
|
||||
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
|
||||
|
@ -5974,7 +5985,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
|
|||
|
||||
kvm_x86_ops->patch_hypercall(vcpu, instruction);
|
||||
|
||||
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
|
||||
return emulator_write_emulated(ctxt, rip, instruction, 3,
|
||||
&ctxt->exception);
|
||||
}
|
||||
|
||||
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
|
|||
* We were not able to extract an address from the instruction,
|
||||
* probably because there was something invalid in it.
|
||||
*/
|
||||
if (info->si_addr == (void *)-1) {
|
||||
if (info->si_addr == (void __user *)-1) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void)
|
|||
if (!kernel_managing_mpx_tables(current->mm))
|
||||
return -EINVAL;
|
||||
|
||||
if (do_mpx_bt_fault()) {
|
||||
force_sig(SIGSEGV, current);
|
||||
/*
|
||||
* The force_sig() is essentially "handling" this
|
||||
* exception, so we do not pass up the error
|
||||
* from do_mpx_bt_fault().
|
||||
*/
|
||||
}
|
||||
return 0;
|
||||
return do_mpx_bt_fault();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -134,8 +134,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|||
{
|
||||
struct flush_tlb_info info;
|
||||
|
||||
if (end == 0)
|
||||
end = start + PAGE_SIZE;
|
||||
info.flush_mm = mm;
|
||||
info.flush_start = start;
|
||||
info.flush_end = end;
|
||||
|
@ -264,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
|
|||
}
|
||||
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
|
||||
flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
{
|
||||
struct gendisk *disk;
|
||||
struct blkcg_gq *blkg;
|
||||
struct module *owner;
|
||||
unsigned int major, minor;
|
||||
int key_len, part, ret;
|
||||
char *body;
|
||||
|
@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
if (!disk)
|
||||
return -ENODEV;
|
||||
if (part) {
|
||||
owner = disk->fops->owner;
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
ret = PTR_ERR(blkg);
|
||||
rcu_read_unlock();
|
||||
spin_unlock_irq(disk->queue->queue_lock);
|
||||
owner = disk->fops->owner;
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
/*
|
||||
* If queue was bypassing, we should retry. Do so after a
|
||||
* short msleep(). It isn't strictly necessary but queue
|
||||
|
@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
|
|||
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
|
||||
__releases(ctx->disk->queue->queue_lock) __releases(rcu)
|
||||
{
|
||||
struct module *owner;
|
||||
|
||||
spin_unlock_irq(ctx->disk->queue->queue_lock);
|
||||
rcu_read_unlock();
|
||||
owner = ctx->disk->fops->owner;
|
||||
put_disk(ctx->disk);
|
||||
module_put(owner);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkg_conf_finish);
|
||||
|
||||
|
|
|
@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work)
|
|||
{
|
||||
struct ports_device *portdev;
|
||||
|
||||
portdev = container_of(work, struct ports_device, control_work);
|
||||
portdev = container_of(work, struct ports_device, config_work);
|
||||
if (!use_multiport(portdev)) {
|
||||
struct virtio_device *vdev;
|
||||
struct port *port;
|
||||
|
|
|
@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|||
rate = clk_get_rate(s3c_freq->hclk);
|
||||
if (rate < 133 * 1000 * 1000) {
|
||||
pr_err("cpufreq: HCLK not at 133MHz\n");
|
||||
clk_put(s3c_freq->hclk);
|
||||
ret = -EINVAL;
|
||||
goto err_armclk;
|
||||
}
|
||||
|
|
|
@ -124,6 +124,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(*out_ring && (*out_ring)->adev)) {
|
||||
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
||||
ring, ip_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,11 @@ struct ast_private {
|
|||
struct ttm_bo_kmap_obj cache_kmap;
|
||||
int next_cursor;
|
||||
bool support_wide_screen;
|
||||
bool DisableP2A;
|
||||
enum {
|
||||
ast_use_p2a,
|
||||
ast_use_dt,
|
||||
ast_use_defaults
|
||||
} config_mode;
|
||||
|
||||
enum ast_tx_chip tx_chip_type;
|
||||
u8 dp501_maxclk;
|
||||
|
|
|
@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
|
||||
{
|
||||
struct device_node *np = dev->pdev->dev.of_node;
|
||||
struct ast_private *ast = dev->dev_private;
|
||||
uint32_t data, jregd0, jregd1;
|
||||
|
||||
/* Defaults */
|
||||
ast->config_mode = ast_use_defaults;
|
||||
*scu_rev = 0xffffffff;
|
||||
|
||||
/* Check if we have device-tree properties */
|
||||
if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
|
||||
scu_rev)) {
|
||||
/* We do, disable P2A access */
|
||||
ast->config_mode = ast_use_dt;
|
||||
DRM_INFO("Using device-tree for configuration\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Not all families have a P2A bridge */
|
||||
if (dev->pdev->device != PCI_CHIP_AST2000)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
|
||||
* is disabled. We force using P2A if VGA only mode bit
|
||||
* is set D[7]
|
||||
*/
|
||||
jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
|
||||
jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
|
||||
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
|
||||
/* Double check it's actually working */
|
||||
data = ast_read32(ast, 0xf004);
|
||||
if (data != 0xFFFFFFFF) {
|
||||
/* P2A works, grab silicon revision */
|
||||
ast->config_mode = ast_use_p2a;
|
||||
|
||||
DRM_INFO("Using P2A bridge for configuration\n");
|
||||
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
*scu_rev = ast_read32(ast, 0x1207c);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* We have a P2A bridge but it's disabled */
|
||||
DRM_INFO("P2A bridge disabled, using default configuration\n");
|
||||
}
|
||||
|
||||
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
{
|
||||
struct ast_private *ast = dev->dev_private;
|
||||
uint32_t data, jreg;
|
||||
uint32_t jreg, scu_rev;
|
||||
|
||||
/*
|
||||
* If VGA isn't enabled, we need to enable now or subsequent
|
||||
* access to the scratch registers will fail. We also inform
|
||||
* our caller that it needs to POST the chip
|
||||
* (Assumption: VGA not enabled -> need to POST)
|
||||
*/
|
||||
if (!ast_is_vga_enabled(dev)) {
|
||||
ast_enable_vga(dev);
|
||||
DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
|
||||
*need_post = true;
|
||||
} else
|
||||
*need_post = false;
|
||||
|
||||
|
||||
/* Enable extended register access */
|
||||
ast_enable_mmio(dev);
|
||||
ast_open_key(ast);
|
||||
|
||||
/* Find out whether P2A works or whether to use device-tree */
|
||||
ast_detect_config_mode(dev, &scu_rev);
|
||||
|
||||
/* Identify chipset */
|
||||
if (dev->pdev->device == PCI_CHIP_AST1180) {
|
||||
ast->chip = AST1100;
|
||||
DRM_INFO("AST 1180 detected\n");
|
||||
|
@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
ast->chip = AST2300;
|
||||
DRM_INFO("AST 2300 detected\n");
|
||||
} else if (dev->pdev->revision >= 0x10) {
|
||||
uint32_t data;
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
|
||||
data = ast_read32(ast, 0x1207c);
|
||||
switch (data & 0x0300) {
|
||||
switch (scu_rev & 0x0300) {
|
||||
case 0x0200:
|
||||
ast->chip = AST1100;
|
||||
DRM_INFO("AST 1100 detected\n");
|
||||
|
@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If VGA isn't enabled, we need to enable now or subsequent
|
||||
* access to the scratch registers will fail. We also inform
|
||||
* our caller that it needs to POST the chip
|
||||
* (Assumption: VGA not enabled -> need to POST)
|
||||
*/
|
||||
if (!ast_is_vga_enabled(dev)) {
|
||||
ast_enable_vga(dev);
|
||||
ast_enable_mmio(dev);
|
||||
DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
|
||||
*need_post = true;
|
||||
} else
|
||||
*need_post = false;
|
||||
|
||||
/* Check P2A Access */
|
||||
ast->DisableP2A = true;
|
||||
data = ast_read32(ast, 0xf004);
|
||||
if (data != 0xFFFFFFFF)
|
||||
ast->DisableP2A = false;
|
||||
|
||||
/* Check if we support wide screen */
|
||||
switch (ast->chip) {
|
||||
case AST1180:
|
||||
|
@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
ast->support_wide_screen = true;
|
||||
else {
|
||||
ast->support_wide_screen = false;
|
||||
if (ast->DisableP2A == false) {
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x1207c);
|
||||
data &= 0x300;
|
||||
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
}
|
||||
if (ast->chip == AST2300 &&
|
||||
(scu_rev & 0x300) == 0x0) /* ast1300 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->chip == AST2400 &&
|
||||
(scu_rev & 0x300) == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
|
||||
static int ast_get_dram_info(struct drm_device *dev)
|
||||
{
|
||||
struct device_node *np = dev->pdev->dev.of_node;
|
||||
struct ast_private *ast = dev->dev_private;
|
||||
uint32_t data, data2;
|
||||
uint32_t denum, num, div, ref_pll;
|
||||
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
|
||||
uint32_t denum, num, div, ref_pll, dsel;
|
||||
|
||||
if (ast->DisableP2A)
|
||||
{
|
||||
switch (ast->config_mode) {
|
||||
case ast_use_dt:
|
||||
/*
|
||||
* If some properties are missing, use reasonable
|
||||
* defaults for AST2400
|
||||
*/
|
||||
if (of_property_read_u32(np, "aspeed,mcr-configuration",
|
||||
&mcr_cfg))
|
||||
mcr_cfg = 0x00000577;
|
||||
if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
|
||||
&mcr_scu_mpll))
|
||||
mcr_scu_mpll = 0x000050C0;
|
||||
if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
|
||||
&mcr_scu_strap))
|
||||
mcr_scu_strap = 0;
|
||||
break;
|
||||
case ast_use_p2a:
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
mcr_cfg = ast_read32(ast, 0x10004);
|
||||
mcr_scu_mpll = ast_read32(ast, 0x10120);
|
||||
mcr_scu_strap = ast_read32(ast, 0x10170);
|
||||
break;
|
||||
case ast_use_defaults:
|
||||
default:
|
||||
ast->dram_bus_width = 16;
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
ast->mclk = 396;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mcr_cfg & 0x40)
|
||||
ast->dram_bus_width = 16;
|
||||
else
|
||||
{
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
ast->dram_bus_width = 32;
|
||||
|
||||
if (data & 0x40)
|
||||
ast->dram_bus_width = 16;
|
||||
else
|
||||
ast->dram_bus_width = 32;
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400) {
|
||||
switch (data & 0x03) {
|
||||
case 0:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
default:
|
||||
case 1:
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
break;
|
||||
case 2:
|
||||
ast->dram_type = AST_DRAM_2Gx16;
|
||||
break;
|
||||
case 3:
|
||||
ast->dram_type = AST_DRAM_4Gx16;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (data & 0x0c) {
|
||||
case 0:
|
||||
case 4:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
case 8:
|
||||
if (data & 0x40)
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
else
|
||||
ast->dram_type = AST_DRAM_512Mx32;
|
||||
break;
|
||||
case 0xc:
|
||||
ast->dram_type = AST_DRAM_1Gx32;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
data = ast_read32(ast, 0x10120);
|
||||
data2 = ast_read32(ast, 0x10170);
|
||||
if (data2 & 0x2000)
|
||||
ref_pll = 14318;
|
||||
else
|
||||
ref_pll = 12000;
|
||||
|
||||
denum = data & 0x1f;
|
||||
num = (data & 0x3fe0) >> 5;
|
||||
data = (data & 0xc000) >> 14;
|
||||
switch (data) {
|
||||
case 3:
|
||||
div = 0x4;
|
||||
break;
|
||||
case 2:
|
||||
case 1:
|
||||
div = 0x2;
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400) {
|
||||
switch (mcr_cfg & 0x03) {
|
||||
case 0:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
default:
|
||||
div = 0x1;
|
||||
case 1:
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
break;
|
||||
case 2:
|
||||
ast->dram_type = AST_DRAM_2Gx16;
|
||||
break;
|
||||
case 3:
|
||||
ast->dram_type = AST_DRAM_4Gx16;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (mcr_cfg & 0x0c) {
|
||||
case 0:
|
||||
case 4:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
case 8:
|
||||
if (mcr_cfg & 0x40)
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
else
|
||||
ast->dram_type = AST_DRAM_512Mx32;
|
||||
break;
|
||||
case 0xc:
|
||||
ast->dram_type = AST_DRAM_1Gx32;
|
||||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
}
|
||||
|
||||
if (mcr_scu_strap & 0x2000)
|
||||
ref_pll = 14318;
|
||||
else
|
||||
ref_pll = 12000;
|
||||
|
||||
denum = mcr_scu_mpll & 0x1f;
|
||||
num = (mcr_scu_mpll & 0x3fe0) >> 5;
|
||||
dsel = (mcr_scu_mpll & 0xc000) >> 14;
|
||||
switch (dsel) {
|
||||
case 3:
|
||||
div = 0x4;
|
||||
break;
|
||||
case 2:
|
||||
case 1:
|
||||
div = 0x2;
|
||||
break;
|
||||
default:
|
||||
div = 0x1;
|
||||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
|
|||
ast_enable_mmio(dev);
|
||||
ast_set_def_ext_reg(dev);
|
||||
|
||||
if (ast->DisableP2A == false)
|
||||
{
|
||||
if (ast->config_mode == ast_use_p2a) {
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400)
|
||||
ast_init_dram_2300(dev);
|
||||
else
|
||||
ast_init_dram_reg(dev);
|
||||
|
||||
ast_init_3rdtx(dev);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
if (ast->tx_chip_type != AST_TX_NONE)
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
|
||||
}
|
||||
|
|
|
@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
|
|||
list_for_each_entry_safe(entry, next, &man->list, head)
|
||||
vmw_cmdbuf_res_free(man, entry);
|
||||
|
||||
drm_ht_remove(&man->resources);
|
||||
kfree(man);
|
||||
}
|
||||
|
||||
|
|
|
@ -364,6 +364,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The HID over I2C specification states that if a DEVICE needs time
|
||||
* after the PWR_ON request, it should utilise CLOCK stretching.
|
||||
* However, it has been observered that the Windows driver provides a
|
||||
* 1ms sleep between the PWR_ON and RESET requests and that some devices
|
||||
* rely on this.
|
||||
*/
|
||||
usleep_range(1000, 5000);
|
||||
|
||||
i2c_hid_dbg(ihid, "resetting...\n");
|
||||
|
||||
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
|
||||
|
|
|
@ -699,9 +699,9 @@ out_clear_state:
|
|||
|
||||
out_unregister:
|
||||
mmu_notifier_unregister(&pasid_state->mn, mm);
|
||||
mmput(mm);
|
||||
|
||||
out_free:
|
||||
mmput(mm);
|
||||
free_pasid_state(pasid_state);
|
||||
|
||||
out:
|
||||
|
|
|
@ -1137,7 +1137,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
|
|||
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
|
||||
goto next;
|
||||
|
||||
level_pfn = pfn & level_mask(level - 1);
|
||||
level_pfn = pfn & level_mask(level);
|
||||
level_pte = phys_to_virt(dma_pte_addr(pte));
|
||||
|
||||
if (level > 2)
|
||||
|
|
|
@ -391,36 +391,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
|||
device->dev = dev;
|
||||
|
||||
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
|
||||
if (ret) {
|
||||
kfree(device);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_free_device;
|
||||
|
||||
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
|
||||
rename:
|
||||
if (!device->name) {
|
||||
sysfs_remove_link(&dev->kobj, "iommu_group");
|
||||
kfree(device);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_remove_link;
|
||||
}
|
||||
|
||||
ret = sysfs_create_link_nowarn(group->devices_kobj,
|
||||
&dev->kobj, device->name);
|
||||
if (ret) {
|
||||
kfree(device->name);
|
||||
if (ret == -EEXIST && i >= 0) {
|
||||
/*
|
||||
* Account for the slim chance of collision
|
||||
* and append an instance to the name.
|
||||
*/
|
||||
kfree(device->name);
|
||||
device->name = kasprintf(GFP_KERNEL, "%s.%d",
|
||||
kobject_name(&dev->kobj), i++);
|
||||
goto rename;
|
||||
}
|
||||
|
||||
sysfs_remove_link(&dev->kobj, "iommu_group");
|
||||
kfree(device);
|
||||
return ret;
|
||||
goto err_free_name;
|
||||
}
|
||||
|
||||
kobject_get(group->devices_kobj);
|
||||
|
@ -432,8 +426,10 @@ rename:
|
|||
mutex_lock(&group->mutex);
|
||||
list_add_tail(&device->list, &group->devices);
|
||||
if (group->domain)
|
||||
__iommu_attach_device(group->domain, dev);
|
||||
ret = __iommu_attach_device(group->domain, dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
if (ret)
|
||||
goto err_put_group;
|
||||
|
||||
/* Notify any listeners about change to group. */
|
||||
blocking_notifier_call_chain(&group->notifier,
|
||||
|
@ -444,6 +440,21 @@ rename:
|
|||
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_group:
|
||||
mutex_lock(&group->mutex);
|
||||
list_del(&device->list);
|
||||
mutex_unlock(&group->mutex);
|
||||
dev->iommu_group = NULL;
|
||||
kobject_put(group->devices_kobj);
|
||||
err_free_name:
|
||||
kfree(device->name);
|
||||
err_remove_link:
|
||||
sysfs_remove_link(&dev->kobj, "iommu_group");
|
||||
err_free_device:
|
||||
kfree(device);
|
||||
pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_add_device);
|
||||
|
||||
|
|
|
@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
|
|||
{
|
||||
uint32_t buf;
|
||||
size_t bytes_read;
|
||||
int err;
|
||||
|
||||
if (mtd_read(master, offset, sizeof(buf), &bytes_read,
|
||||
(uint8_t *)&buf) < 0) {
|
||||
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
|
||||
offset);
|
||||
err = mtd_read(master, offset, sizeof(buf), &bytes_read,
|
||||
(uint8_t *)&buf);
|
||||
if (err && !mtd_is_bitflip(err)) {
|
||||
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
|
||||
offset, err);
|
||||
goto out_default;
|
||||
}
|
||||
|
||||
|
@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
|
|||
int trx_part = -1;
|
||||
int last_trx_part = -1;
|
||||
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
|
||||
|
@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
|
|||
/* Parse block by block looking for magics */
|
||||
for (offset = 0; offset <= master->size - blocksize;
|
||||
offset += blocksize) {
|
||||
/* Nothing more in higher memory */
|
||||
if (offset >= 0x2000000)
|
||||
/* Nothing more in higher memory on BCM47XX (MIPS) */
|
||||
if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
|
||||
break;
|
||||
|
||||
if (curr_part >= BCM47XXPART_MAX_PARTS) {
|
||||
|
@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
|
|||
}
|
||||
|
||||
/* Read beginning of the block */
|
||||
if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
|
||||
&bytes_read, (uint8_t *)buf) < 0) {
|
||||
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
|
||||
offset);
|
||||
err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
|
||||
&bytes_read, (uint8_t *)buf);
|
||||
if (err && !mtd_is_bitflip(err)) {
|
||||
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
|
||||
offset, err);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
|
|||
}
|
||||
|
||||
/* Read middle of the block */
|
||||
if (mtd_read(master, offset + 0x8000, 0x4,
|
||||
&bytes_read, (uint8_t *)buf) < 0) {
|
||||
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
|
||||
offset);
|
||||
err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
|
||||
(uint8_t *)buf);
|
||||
if (err && !mtd_is_bitflip(err)) {
|
||||
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
|
||||
offset, err);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
|
|||
}
|
||||
|
||||
offset = master->size - possible_nvram_sizes[i];
|
||||
if (mtd_read(master, offset, 0x4, &bytes_read,
|
||||
(uint8_t *)buf) < 0) {
|
||||
pr_err("mtd_read error while reading at offset 0x%X!\n",
|
||||
offset);
|
||||
err = mtd_read(master, offset, 0x4, &bytes_read,
|
||||
(uint8_t *)buf);
|
||||
if (err && !mtd_is_bitflip(err)) {
|
||||
pr_err("mtd_read error while reading (offset 0x%X): %d\n",
|
||||
offset, err);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -2732,8 +2732,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
|
|||
|
||||
/* Flush Tx queues */
|
||||
ret = xgbe_flush_tx_queues(pdata);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
netdev_err(pdata->netdev, "error flushing TX queues\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize DMA related features
|
||||
|
|
|
@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|||
|
||||
DBGPR("-->xgbe_start\n");
|
||||
|
||||
hw_if->init(pdata);
|
||||
ret = hw_if->init(pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = phy_if->phy_start(pdata);
|
||||
if (ret)
|
||||
|
|
|
@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
|
|||
while (ring->start != ring->end) {
|
||||
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
|
||||
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
|
||||
u32 ctl1;
|
||||
u32 ctl0, ctl1;
|
||||
int len;
|
||||
|
||||
if (slot_idx == empty_slot)
|
||||
break;
|
||||
|
||||
ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
|
||||
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
|
||||
len = ctl1 & BGMAC_DESC_CTL1_LEN;
|
||||
if (ctl1 & BGMAC_DESC_CTL0_SOF)
|
||||
if (ctl0 & BGMAC_DESC_CTL0_SOF)
|
||||
/* Unmap no longer used buffer */
|
||||
dma_unmap_single(dma_dev, slot->dma_addr, len,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -469,6 +470,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
|
|||
len -= ETH_FCS_LEN;
|
||||
|
||||
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
bgmac_err(bgmac, "build_skb failed\n");
|
||||
put_page(virt_to_head_page(buf));
|
||||
break;
|
||||
}
|
||||
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
|
||||
BGMAC_RX_BUF_OFFSET + len);
|
||||
skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
|
||||
|
@ -1302,7 +1308,8 @@ static int bgmac_open(struct net_device *net_dev)
|
|||
|
||||
phy_start(bgmac->phy_dev);
|
||||
|
||||
netif_carrier_on(net_dev);
|
||||
netif_start_queue(net_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1052,7 +1052,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
|||
err:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
|
||||
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
|
||||
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
|
||||
status = -EPERM;
|
||||
|
||||
return status;
|
||||
|
|
|
@ -2939,7 +2939,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
|
|||
size, GFAR_RXB_TRUESIZE);
|
||||
|
||||
/* try reuse page */
|
||||
if (unlikely(page_count(page) != 1))
|
||||
if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
|
||||
return false;
|
||||
|
||||
/* change offset to the other half */
|
||||
|
|
|
@ -1602,8 +1602,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
netdev->netdev_ops = &ibmveth_netdev_ops;
|
||||
netdev->ethtool_ops = &netdev_ethtool_ops;
|
||||
SET_NETDEV_DEV(netdev, &dev->dev);
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||||
netdev->hw_features = NETIF_F_SG;
|
||||
if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
|
||||
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXCSUM;
|
||||
}
|
||||
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
||||
|
|
|
@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
|
|||
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
|
||||
&lp->rx_dma_regs->dmasm);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
if (korina_init(dev) < 0) {
|
||||
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
|
||||
return;
|
||||
|
@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
|
|||
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
|
||||
writel(tmp, &lp->rx_dma_regs->dmasm);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
cancel_work_sync(&lp->restart_task);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
free_irq(lp->rx_irq, dev);
|
||||
free_irq(lp->tx_irq, dev);
|
||||
free_irq(lp->ovr_irq, dev);
|
||||
|
|
|
@ -2446,7 +2446,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
|
|||
mvneta_port_enable(pp);
|
||||
|
||||
/* Enable polling on the port */
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
|
||||
|
||||
napi_enable(&port->napi);
|
||||
|
@ -2472,7 +2472,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
|
|||
|
||||
phy_stop(pp->phy_dev);
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
|
||||
|
||||
napi_disable(&port->napi);
|
||||
|
@ -2902,13 +2902,11 @@ err_cleanup_rxqs:
|
|||
static int mvneta_stop(struct net_device *dev)
|
||||
{
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
int cpu;
|
||||
|
||||
mvneta_stop_dev(pp);
|
||||
mvneta_mdio_remove(pp);
|
||||
unregister_cpu_notifier(&pp->cpu_notifier);
|
||||
for_each_present_cpu(cpu)
|
||||
smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
|
||||
on_each_cpu(mvneta_percpu_disable, pp, true);
|
||||
free_percpu_irq(dev->irq, pp->ports);
|
||||
mvneta_cleanup_rxqs(pp);
|
||||
mvneta_cleanup_txqs(pp);
|
||||
|
|
|
@ -542,8 +542,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
|||
break;
|
||||
|
||||
case MLX4_EVENT_TYPE_SRQ_LIMIT:
|
||||
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
|
||||
__func__);
|
||||
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
|
||||
__func__, be32_to_cpu(eqe->event.srq.srqn),
|
||||
eq->eqn);
|
||||
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
if (mlx4_is_master(dev)) {
|
||||
/* forward only to slave owning the SRQ */
|
||||
|
@ -558,15 +559,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
|||
eq->eqn, eq->cons_index, ret);
|
||||
break;
|
||||
}
|
||||
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
|
||||
__func__, slave,
|
||||
be32_to_cpu(eqe->event.srq.srqn),
|
||||
eqe->type, eqe->subtype);
|
||||
if (eqe->type ==
|
||||
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
|
||||
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
|
||||
__func__, slave,
|
||||
be32_to_cpu(eqe->event.srq.srqn),
|
||||
eqe->type, eqe->subtype);
|
||||
|
||||
if (!ret && slave != dev->caps.function) {
|
||||
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
|
||||
__func__, eqe->type,
|
||||
eqe->subtype, slave);
|
||||
if (eqe->type ==
|
||||
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
|
||||
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
|
||||
__func__, eqe->type,
|
||||
eqe->subtype, slave);
|
||||
mlx4_slave_event(dev, slave, eqe);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -153,8 +153,9 @@ static struct mlx5_profile profile[] = {
|
|||
},
|
||||
};
|
||||
|
||||
#define FW_INIT_TIMEOUT_MILI 2000
|
||||
#define FW_INIT_WAIT_MS 2
|
||||
#define FW_INIT_TIMEOUT_MILI 2000
|
||||
#define FW_INIT_WAIT_MS 2
|
||||
#define FW_PRE_INIT_TIMEOUT_MILI 10000
|
||||
|
||||
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
|
||||
{
|
||||
|
@ -934,6 +935,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
|||
*/
|
||||
dev->state = MLX5_DEVICE_STATE_UP;
|
||||
|
||||
/* wait for firmware to accept initialization segments configurations
|
||||
*/
|
||||
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
|
||||
if (err) {
|
||||
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
|
||||
FW_PRE_INIT_TIMEOUT_MILI);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
|
||||
|
|
|
@ -221,18 +221,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
int ring_size;
|
||||
int i;
|
||||
|
||||
/* Free RX skb ringbuffer */
|
||||
if (priv->rx_skb[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++)
|
||||
dev_kfree_skb(priv->rx_skb[q][i]);
|
||||
}
|
||||
kfree(priv->rx_skb[q]);
|
||||
priv->rx_skb[q] = NULL;
|
||||
|
||||
/* Free aligned TX buffers */
|
||||
kfree(priv->tx_align[q]);
|
||||
priv->tx_align[q] = NULL;
|
||||
|
||||
if (priv->rx_ring[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
|
||||
|
@ -261,6 +249,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
priv->tx_ring[q] = NULL;
|
||||
}
|
||||
|
||||
/* Free RX skb ringbuffer */
|
||||
if (priv->rx_skb[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++)
|
||||
dev_kfree_skb(priv->rx_skb[q][i]);
|
||||
}
|
||||
kfree(priv->rx_skb[q]);
|
||||
priv->rx_skb[q] = NULL;
|
||||
|
||||
/* Free aligned TX buffers */
|
||||
kfree(priv->tx_align[q]);
|
||||
priv->tx_align[q] = NULL;
|
||||
|
||||
/* Free TX skb ringbuffer.
|
||||
* SKBs are freed by ravb_tx_free() call above.
|
||||
*/
|
||||
|
|
|
@ -2796,6 +2796,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = NETIF_F_IP_CSUM,
|
||||
.mcdi_max_ver = -1,
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
.vswitching_probe = efx_port_dummy_op_int,
|
||||
.vswitching_restore = efx_port_dummy_op_int,
|
||||
.vswitching_remove = efx_port_dummy_op_void,
|
||||
#endif
|
||||
};
|
||||
|
||||
const struct efx_nic_type falcon_b0_nic_type = {
|
||||
|
@ -2897,4 +2902,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
|
||||
.mcdi_max_ver = -1,
|
||||
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
.vswitching_probe = efx_port_dummy_op_int,
|
||||
.vswitching_restore = efx_port_dummy_op_int,
|
||||
.vswitching_remove = efx_port_dummy_op_void,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
|
|||
*/
|
||||
DECLARE_EWMA(pkt_len, 1, 64)
|
||||
|
||||
/* With mergeable buffers we align buffer address and use the low bits to
|
||||
* encode its true size. Buffer size is up to 1 page so we need to align to
|
||||
* square root of page size to ensure we reserve enough bits to encode the true
|
||||
* size.
|
||||
*/
|
||||
#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
|
||||
|
||||
/* Minimum alignment for mergeable packet buffers. */
|
||||
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
|
||||
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
|
||||
1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
|
||||
|
||||
#define VIRTNET_DRIVER_VERSION "1.0.0"
|
||||
|
||||
|
|
|
@ -2240,7 +2240,7 @@ static void vxlan_cleanup(unsigned long arg)
|
|||
= container_of(p, struct vxlan_fdb, hlist);
|
||||
unsigned long timeout;
|
||||
|
||||
if (f->state & NUD_PERMANENT)
|
||||
if (f->state & (NUD_PERMANENT | NUD_NOARP))
|
||||
continue;
|
||||
|
||||
timeout = f->used + vxlan->cfg.age_interval * HZ;
|
||||
|
|
|
@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
queue->rx.req_prod_pvt = req_prod;
|
||||
|
||||
/* Not enough requests? Try again later. */
|
||||
if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
|
||||
if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
|
||||
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -807,6 +807,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
|
|||
case 11:
|
||||
case 7:
|
||||
case 6:
|
||||
case 1:
|
||||
ideapad_input_report(priv, vpc_bit);
|
||||
break;
|
||||
case 5:
|
||||
|
|
|
@ -3563,12 +3563,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
|
|||
} else {
|
||||
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
|
||||
lpfc_els_free_data(phba, buf_ptr1);
|
||||
elsiocb->context2 = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (elsiocb->context3) {
|
||||
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
|
||||
lpfc_els_free_bpl(phba, buf_ptr);
|
||||
elsiocb->context3 = NULL;
|
||||
}
|
||||
lpfc_sli_release_iocbq(phba, elsiocb);
|
||||
return 0;
|
||||
|
|
|
@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
|
|||
|
||||
free_vfi_bmask:
|
||||
kfree(phba->sli4_hba.vfi_bmask);
|
||||
phba->sli4_hba.vfi_bmask = NULL;
|
||||
free_xri_ids:
|
||||
kfree(phba->sli4_hba.xri_ids);
|
||||
phba->sli4_hba.xri_ids = NULL;
|
||||
free_xri_bmask:
|
||||
kfree(phba->sli4_hba.xri_bmask);
|
||||
phba->sli4_hba.xri_bmask = NULL;
|
||||
free_vpi_ids:
|
||||
kfree(phba->vpi_ids);
|
||||
phba->vpi_ids = NULL;
|
||||
free_vpi_bmask:
|
||||
kfree(phba->vpi_bmask);
|
||||
phba->vpi_bmask = NULL;
|
||||
free_rpi_ids:
|
||||
kfree(phba->sli4_hba.rpi_ids);
|
||||
phba->sli4_hba.rpi_ids = NULL;
|
||||
free_rpi_bmask:
|
||||
kfree(phba->sli4_hba.rpi_bmask);
|
||||
phba->sli4_hba.rpi_bmask = NULL;
|
||||
err_exit:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -2466,6 +2466,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
|
|||
if (pkt->entry_status & RF_BUSY)
|
||||
res = DID_BUS_BUSY << 16;
|
||||
|
||||
if (pkt->entry_type == NOTIFY_ACK_TYPE &&
|
||||
pkt->handle == QLA_TGT_SKIP_HANDLE)
|
||||
return;
|
||||
|
||||
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
|
||||
if (sp) {
|
||||
sp->done(ha, sp, res);
|
||||
|
|
|
@ -2865,7 +2865,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
|
|||
|
||||
pkt->entry_type = NOTIFY_ACK_TYPE;
|
||||
pkt->entry_count = 1;
|
||||
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
||||
pkt->handle = QLA_TGT_SKIP_HANDLE;
|
||||
|
||||
nack = (struct nack_to_isp *)pkt;
|
||||
nack->ox_id = ntfy->ox_id;
|
||||
|
|
|
@ -2566,7 +2566,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
if (sdp->broken_fua) {
|
||||
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
|
||||
sdkp->DPOFUA = 0;
|
||||
} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
|
||||
} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
|
||||
!sdkp->device->use_16_for_rw) {
|
||||
sd_first_printk(KERN_NOTICE, sdkp,
|
||||
"Uses READ/WRITE(6), disabling FUA\n");
|
||||
sdkp->DPOFUA = 0;
|
||||
|
|
|
@ -533,7 +533,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
|
|||
{
|
||||
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
|
||||
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
|
||||
unsigned long flags;
|
||||
int req_size;
|
||||
int ret;
|
||||
|
||||
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
|
||||
|
||||
|
@ -561,8 +563,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
|
|||
req_size = sizeof(cmd->req.cmd);
|
||||
}
|
||||
|
||||
if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
|
||||
ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
|
||||
if (ret == -EIO) {
|
||||
cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
|
||||
spin_lock_irqsave(&req_vq->vq_lock, flags);
|
||||
virtscsi_complete_cmd(vscsi, cmd);
|
||||
spin_unlock_irqrestore(&req_vq->vq_lock, flags);
|
||||
} else if (ret != 0) {
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
|
|||
buf = t->rx_buf;
|
||||
t->rx_dma = dma_map_single(&spi->dev, buf,
|
||||
t->len, DMA_FROM_DEVICE);
|
||||
if (!t->rx_dma) {
|
||||
if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
|
||||
ret = -EFAULT;
|
||||
goto err_rx_map;
|
||||
}
|
||||
|
@ -665,7 +665,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
|
|||
buf = (void *)t->tx_buf;
|
||||
t->tx_dma = dma_map_single(&spi->dev, buf,
|
||||
t->len, DMA_TO_DEVICE);
|
||||
if (!t->tx_dma) {
|
||||
if (dma_mapping_error(&spi->dev, t->tx_dma)) {
|
||||
ret = -EFAULT;
|
||||
goto err_tx_map;
|
||||
}
|
||||
|
|
|
@ -1163,6 +1163,10 @@ static int tce_iommu_attach_group(void *iommu_data,
|
|||
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
|
||||
iommu_group_id(iommu_group), iommu_group); */
|
||||
table_group = iommu_group_get_iommudata(iommu_group);
|
||||
if (!table_group) {
|
||||
ret = -ENODEV;
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
if (tce_groups_attached(container) && (!table_group->ops ||
|
||||
!table_group->ops->take_ownership ||
|
||||
|
|
|
@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
|
|||
if (!wdt)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&wdt->lock);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
wdt->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(wdt->base))
|
||||
|
@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_init(&wdt->lock);
|
||||
platform_set_drvdata(pdev, wdt);
|
||||
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
|
||||
bcm_kona_wdt_wdd.parent = &pdev->dev;
|
||||
|
|
|
@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|||
if (map == SWIOTLB_MAP_ERROR)
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
dev_addr = xen_phys_to_bus(map);
|
||||
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
||||
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
|
||||
dev_addr = xen_phys_to_bus(map);
|
||||
|
||||
/*
|
||||
* Ensure that the address returned is DMA'ble
|
||||
|
@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|||
sg_dma_len(sgl) = 0;
|
||||
return 0;
|
||||
}
|
||||
dev_addr = xen_phys_to_bus(map);
|
||||
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
|
||||
dev_addr,
|
||||
map & ~PAGE_MASK,
|
||||
sg->length,
|
||||
dir,
|
||||
attrs);
|
||||
sg->dma_address = xen_phys_to_bus(map);
|
||||
sg->dma_address = dev_addr;
|
||||
} else {
|
||||
/* we are not interested in the dma_addr returned by
|
||||
* xen_dma_map_page, only in the potential cache flushes executed
|
||||
|
|
|
@ -2295,6 +2295,7 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|||
goto end_coredump;
|
||||
}
|
||||
}
|
||||
dump_truncate(cprm);
|
||||
|
||||
if (!elf_core_write_extra_data(cprm))
|
||||
goto end_coredump;
|
||||
|
|
|
@ -4397,8 +4397,19 @@ search_again:
|
|||
if (found_type > min_type) {
|
||||
del_item = 1;
|
||||
} else {
|
||||
if (item_end < new_size)
|
||||
if (item_end < new_size) {
|
||||
/*
|
||||
* With NO_HOLES mode, for the following mapping
|
||||
*
|
||||
* [0-4k][hole][8k-12k]
|
||||
*
|
||||
* if truncating isize down to 6k, it ends up
|
||||
* isize being 8k.
|
||||
*/
|
||||
if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
|
||||
last_size = new_size;
|
||||
break;
|
||||
}
|
||||
if (found_key.offset >= new_size)
|
||||
del_item = 1;
|
||||
else
|
||||
|
|
|
@ -810,3 +810,21 @@ int dump_align(struct coredump_params *cprm, int align)
|
|||
return mod ? dump_skip(cprm, align - mod) : 1;
|
||||
}
|
||||
EXPORT_SYMBOL(dump_align);
|
||||
|
||||
/*
|
||||
* Ensures that file size is big enough to contain the current file
|
||||
* postion. This prevents gdb from complaining about a truncated file
|
||||
* if the last "write" to the file was dump_skip.
|
||||
*/
|
||||
void dump_truncate(struct coredump_params *cprm)
|
||||
{
|
||||
struct file *file = cprm->file;
|
||||
loff_t offset;
|
||||
|
||||
if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
|
||||
offset = file->f_op->llseek(file, 0, SEEK_CUR);
|
||||
if (i_size_read(file->f_mapping->host) < offset)
|
||||
do_truncate(file->f_path.dentry, offset, 0, file);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dump_truncate);
|
||||
|
|
|
@ -2188,8 +2188,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
|
|||
if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
|
||||
return 0;
|
||||
|
||||
/* even though OPEN succeeded, access is denied. Close the file */
|
||||
nfs4_close_state(state, fmode);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
|
|
|
@ -2070,13 +2070,13 @@ unlock:
|
|||
spin_unlock(&o2hb_live_lock);
|
||||
}
|
||||
|
||||
static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
|
||||
static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", o2hb_dead_threshold);
|
||||
}
|
||||
|
||||
static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
|
||||
static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
@ -2125,11 +2125,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
|
|||
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
|
||||
CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
|
||||
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
|
||||
|
||||
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
|
||||
&o2hb_heartbeat_group_attr_threshold,
|
||||
&o2hb_heartbeat_group_attr_dead_threshold,
|
||||
&o2hb_heartbeat_group_attr_mode,
|
||||
NULL,
|
||||
};
|
||||
|
|
|
@ -14,6 +14,7 @@ struct coredump_params;
|
|||
extern int dump_skip(struct coredump_params *cprm, size_t nr);
|
||||
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
|
||||
extern int dump_align(struct coredump_params *cprm, int align);
|
||||
extern void dump_truncate(struct coredump_params *cprm);
|
||||
#ifdef CONFIG_COREDUMP
|
||||
extern void do_coredump(const siginfo_t *siginfo);
|
||||
#else
|
||||
|
|
|
@ -948,10 +948,6 @@ struct xfrm_dst {
|
|||
struct flow_cache_object flo;
|
||||
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
|
||||
int num_pols, num_xfrms;
|
||||
#ifdef CONFIG_XFRM_SUB_POLICY
|
||||
struct flowi *origin;
|
||||
struct xfrm_selector *partner;
|
||||
#endif
|
||||
u32 xfrm_genid;
|
||||
u32 policy_genid;
|
||||
u32 route_mtu_cached;
|
||||
|
@ -967,12 +963,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
|
|||
dst_release(xdst->route);
|
||||
if (likely(xdst->u.dst.xfrm))
|
||||
xfrm_state_put(xdst->u.dst.xfrm);
|
||||
#ifdef CONFIG_XFRM_SUB_POLICY
|
||||
kfree(xdst->origin);
|
||||
xdst->origin = NULL;
|
||||
kfree(xdst->partner);
|
||||
xdst->partner = NULL;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@ void panic(const char *fmt, ...)
|
|||
* Delay timeout seconds before rebooting the machine.
|
||||
* We can't use the "normal" timers since we just panicked.
|
||||
*/
|
||||
pr_emerg("Rebooting in %d seconds..", panic_timeout);
|
||||
pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
|
||||
|
||||
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
|
||||
touch_nmi_watchdog();
|
||||
|
|
|
@ -201,8 +201,9 @@ void calc_load_exit_idle(void)
|
|||
struct rq *this_rq = this_rq();
|
||||
|
||||
/*
|
||||
* If we're still before the sample window, we're done.
|
||||
* If we're still before the pending sample window, we're done.
|
||||
*/
|
||||
this_rq->calc_load_update = calc_load_update;
|
||||
if (time_before(jiffies, this_rq->calc_load_update))
|
||||
return;
|
||||
|
||||
|
@ -211,7 +212,6 @@ void calc_load_exit_idle(void)
|
|||
* accounted through the nohz accounting, so skip the entire deal and
|
||||
* sync up for the next window.
|
||||
*/
|
||||
this_rq->calc_load_update = calc_load_update;
|
||||
if (time_before(jiffies, this_rq->calc_load_update + 10))
|
||||
this_rq->calc_load_update += LOAD_FREQ;
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ extern int no_unaligned_warning;
|
|||
#define SYSCTL_WRITES_WARN 0
|
||||
#define SYSCTL_WRITES_STRICT 1
|
||||
|
||||
static int sysctl_writes_strict = SYSCTL_WRITES_WARN;
|
||||
static int sysctl_writes_strict = SYSCTL_WRITES_STRICT;
|
||||
|
||||
static int proc_do_cad_pid(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
|
|
@ -452,11 +452,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
|||
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
|
||||
|
||||
/*
|
||||
* For mappings greater than a page, we limit the stride (and
|
||||
* hence alignment) to a page size.
|
||||
* For mappings greater than or equal to a page, we limit the stride
|
||||
* (and hence alignment) to a page size.
|
||||
*/
|
||||
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
if (size > PAGE_SIZE)
|
||||
if (size >= PAGE_SIZE)
|
||||
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
|
||||
else
|
||||
stride = 1;
|
||||
|
|
|
@ -1363,8 +1363,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
*/
|
||||
if (unlikely(pmd_trans_migrating(*pmdp))) {
|
||||
page = pmd_page(*pmdp);
|
||||
if (!get_page_unless_zero(page))
|
||||
goto out_unlock;
|
||||
spin_unlock(ptl);
|
||||
wait_on_page_locked(page);
|
||||
put_page(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1396,8 +1399,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
|
||||
/* Migration could have started since the pmd_trans_migrating check */
|
||||
if (!page_locked) {
|
||||
if (!get_page_unless_zero(page))
|
||||
goto out_unlock;
|
||||
spin_unlock(ptl);
|
||||
wait_on_page_locked(page);
|
||||
put_page(page);
|
||||
page_nid = -1;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type)
|
|||
struct page *page = map[i];
|
||||
if (page)
|
||||
__free_page(page);
|
||||
if (!(i % SWAP_CLUSTER_MAX))
|
||||
cond_resched();
|
||||
}
|
||||
vfree(map);
|
||||
}
|
||||
|
|
|
@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
|
|||
return 0;
|
||||
|
||||
out_free_newdev:
|
||||
free_netdev(new_dev);
|
||||
if (new_dev->reg_state == NETREG_UNINITIALIZED)
|
||||
free_netdev(new_dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (likely(in_interrupt()))
|
||||
skb = alloc_skb(len + pfx, GFP_ATOMIC);
|
||||
else
|
||||
skb = alloc_skb(len + pfx, GFP_KERNEL);
|
||||
|
||||
skb = alloc_skb(len + pfx, GFP_ATOMIC);
|
||||
if (unlikely(skb == NULL))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1246,8 +1246,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
|
|||
if (!new_ifalias)
|
||||
return -ENOMEM;
|
||||
dev->ifalias = new_ifalias;
|
||||
memcpy(dev->ifalias, alias, len);
|
||||
dev->ifalias[len] = 0;
|
||||
|
||||
strlcpy(dev->ifalias, alias, len+1);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
|
|
@ -462,6 +462,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
|
|||
spin_lock_bh(&dst_garbage.lock);
|
||||
dst = dst_garbage.list;
|
||||
dst_garbage.list = NULL;
|
||||
/* The code in dst_ifdown places a hold on the loopback device.
|
||||
* If the gc entry processing is set to expire after a lengthy
|
||||
* interval, this hold can cause netdev_wait_allrefs() to hang
|
||||
* out and wait for a long time -- until the the loopback
|
||||
* interface is released. If we're really unlucky, it'll emit
|
||||
* pr_emerg messages to console too. Reset the interval here,
|
||||
* so dst cleanups occur in a more timely fashion.
|
||||
*/
|
||||
if (dst_garbage.timer_inc > DST_GC_INC) {
|
||||
dst_garbage.timer_inc = DST_GC_INC;
|
||||
dst_garbage.timer_expires = DST_GC_MIN;
|
||||
mod_delayed_work(system_wq, &dst_gc_work,
|
||||
dst_garbage.timer_expires);
|
||||
}
|
||||
spin_unlock_bh(&dst_garbage.lock);
|
||||
|
||||
if (last)
|
||||
|
|
|
@ -897,6 +897,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
|
|||
+ nla_total_size(1) /* IFLA_LINKMODE */
|
||||
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
|
||||
+ nla_total_size(4) /* IFLA_LINK_NETNSID */
|
||||
+ nla_total_size(4) /* IFLA_GROUP */
|
||||
+ nla_total_size(ext_filter_mask
|
||||
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
|
||||
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
|
||||
|
@ -1089,6 +1090,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
|||
struct ifla_vf_mac vf_mac;
|
||||
struct ifla_vf_info ivi;
|
||||
|
||||
memset(&ivi, 0, sizeof(ivi));
|
||||
|
||||
/* Not all SR-IOV capable drivers support the
|
||||
* spoofcheck and "RSS query enable" query. Preset to
|
||||
* -1 so the user space tool can detect that the driver
|
||||
|
@ -1097,7 +1100,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
|||
ivi.spoofchk = -1;
|
||||
ivi.rss_query_en = -1;
|
||||
ivi.trusted = -1;
|
||||
memset(ivi.mac, 0, sizeof(ivi.mac));
|
||||
/* The default value for VF link state is "auto"
|
||||
* IFLA_VF_LINK_STATE_AUTO which equals zero
|
||||
*/
|
||||
|
@ -1370,6 +1372,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
|
|||
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
|
||||
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
|
||||
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
|
||||
[IFLA_GROUP] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
|
||||
|
|
|
@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
|
|||
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
|
||||
}
|
||||
|
||||
static inline void dnrt_drop(struct dn_route *rt)
|
||||
{
|
||||
dst_release(&rt->dst);
|
||||
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
|
||||
}
|
||||
|
||||
static void dn_dst_check_expire(unsigned long dummy)
|
||||
{
|
||||
int i;
|
||||
|
@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
|
|||
}
|
||||
*rtp = rt->dst.dn_next;
|
||||
rt->dst.dn_next = NULL;
|
||||
dnrt_drop(rt);
|
||||
dnrt_free(rt);
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&dn_rt_hash_table[i].lock);
|
||||
|
@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
|
|||
dst_use(&rth->dst, now);
|
||||
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
|
||||
|
||||
dnrt_drop(rt);
|
||||
dst_free(&rt->dst);
|
||||
*rp = rth;
|
||||
return 0;
|
||||
}
|
||||
|
@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
|
|||
for(; rt; rt = next) {
|
||||
next = rcu_dereference_raw(rt->dst.dn_next);
|
||||
RCU_INIT_POINTER(rt->dst.dn_next, NULL);
|
||||
dst_free((struct dst_entry *)rt);
|
||||
dnrt_free(rt);
|
||||
}
|
||||
|
||||
nothing_to_declare:
|
||||
|
@ -1187,7 +1181,7 @@ make_route:
|
|||
if (dev_out->flags & IFF_LOOPBACK)
|
||||
flags |= RTCF_LOCAL;
|
||||
|
||||
rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
|
||||
rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
|
||||
if (rt == NULL)
|
||||
goto e_nobufs;
|
||||
|
||||
|
|
|
@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
|
|||
{
|
||||
struct nlmsghdr *nlh = nlmsg_hdr(skb);
|
||||
|
||||
if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
|
||||
if (skb->len < sizeof(*nlh) ||
|
||||
nlh->nlmsg_len < sizeof(*nlh) ||
|
||||
skb->len < nlh->nlmsg_len)
|
||||
return;
|
||||
|
||||
if (!netlink_capable(skb, CAP_NET_ADMIN))
|
||||
|
|
|
@ -1006,10 +1006,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
|
|||
/* Use already configured phy mode */
|
||||
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
|
||||
p->phy_interface = p->phy->interface;
|
||||
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
|
||||
p->phy_interface);
|
||||
|
||||
return 0;
|
||||
return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
|
||||
p->phy_interface);
|
||||
}
|
||||
|
||||
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
|
||||
|
|
|
@ -1102,6 +1102,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
|
|||
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
|
||||
if (!pmc)
|
||||
return;
|
||||
spin_lock_init(&pmc->lock);
|
||||
spin_lock_bh(&im->lock);
|
||||
pmc->interface = im->interface;
|
||||
in_dev_hold(in_dev);
|
||||
|
@ -2026,21 +2027,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
|
||||
static void ip_mc_clear_src(struct ip_mc_list *pmc)
|
||||
{
|
||||
struct ip_sf_list *psf, *nextpsf;
|
||||
struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
|
||||
|
||||
for (psf = pmc->tomb; psf; psf = nextpsf) {
|
||||
nextpsf = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
spin_lock_bh(&pmc->lock);
|
||||
tomb = pmc->tomb;
|
||||
pmc->tomb = NULL;
|
||||
for (psf = pmc->sources; psf; psf = nextpsf) {
|
||||
nextpsf = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
sources = pmc->sources;
|
||||
pmc->sources = NULL;
|
||||
pmc->sfmode = MCAST_EXCLUDE;
|
||||
pmc->sfcount[MCAST_INCLUDE] = 0;
|
||||
pmc->sfcount[MCAST_EXCLUDE] = 1;
|
||||
spin_unlock_bh(&pmc->lock);
|
||||
|
||||
for (psf = tomb; psf; psf = nextpsf) {
|
||||
nextpsf = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
for (psf = sources; psf; psf = nextpsf) {
|
||||
nextpsf = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
}
|
||||
|
||||
/* Join a multicast group
|
||||
|
|
|
@ -318,9 +318,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
|
|||
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
|
||||
unsigned long delay)
|
||||
{
|
||||
if (!delayed_work_pending(&ifp->dad_work))
|
||||
in6_ifa_hold(ifp);
|
||||
mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
|
||||
in6_ifa_hold(ifp);
|
||||
if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
|
||||
in6_ifa_put(ifp);
|
||||
}
|
||||
|
||||
static int snmp6_alloc_dev(struct inet6_dev *idev)
|
||||
|
|
|
@ -32,7 +32,6 @@ struct fib6_rule {
|
|||
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
||||
int flags, pol_lookup_t lookup)
|
||||
{
|
||||
struct rt6_info *rt;
|
||||
struct fib_lookup_arg arg = {
|
||||
.lookup_ptr = lookup,
|
||||
.flags = FIB_LOOKUP_NOREF,
|
||||
|
@ -41,21 +40,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
|||
fib_rules_lookup(net->ipv6.fib6_rules_ops,
|
||||
flowi6_to_flowi(fl6), flags, &arg);
|
||||
|
||||
rt = arg.result;
|
||||
if (arg.result)
|
||||
return arg.result;
|
||||
|
||||
if (!rt) {
|
||||
dst_hold(&net->ipv6.ip6_null_entry->dst);
|
||||
return &net->ipv6.ip6_null_entry->dst;
|
||||
}
|
||||
|
||||
if (rt->rt6i_flags & RTF_REJECT &&
|
||||
rt->dst.error == -EAGAIN) {
|
||||
ip6_rt_put(rt);
|
||||
rt = net->ipv6.ip6_null_entry;
|
||||
dst_hold(&rt->dst);
|
||||
}
|
||||
|
||||
return &rt->dst;
|
||||
dst_hold(&net->ipv6.ip6_null_entry->dst);
|
||||
return &net->ipv6.ip6_null_entry->dst;
|
||||
}
|
||||
|
||||
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
|
||||
|
@ -116,7 +105,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
|
|||
flp6->saddr = saddr;
|
||||
}
|
||||
err = rt->dst.error;
|
||||
goto out;
|
||||
if (err != -EAGAIN)
|
||||
goto out;
|
||||
}
|
||||
again:
|
||||
ip6_rt_put(rt);
|
||||
|
|
|
@ -290,8 +290,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
|||
struct rt6_info *rt;
|
||||
|
||||
rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
|
||||
if (rt->rt6i_flags & RTF_REJECT &&
|
||||
rt->dst.error == -EAGAIN) {
|
||||
if (rt->dst.error == -EAGAIN) {
|
||||
ip6_rt_put(rt);
|
||||
rt = net->ipv6.ip6_null_entry;
|
||||
dst_hold(&rt->dst);
|
||||
|
|
|
@ -1005,8 +1005,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
|
|||
}
|
||||
#endif
|
||||
if (ipv6_addr_v4mapped(&fl6->saddr) &&
|
||||
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
|
||||
return -EAFNOSUPPORT;
|
||||
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
|
||||
err = -EAFNOSUPPORT;
|
||||
goto out_err_release;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = -ENOBUFS;
|
||||
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
|
||||
if (sa->sadb_sa_auth) {
|
||||
int keysize = 0;
|
||||
|
@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
|
|||
if (key)
|
||||
keysize = (key->sadb_key_bits + 7) / 8;
|
||||
x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
|
||||
if (!x->aalg)
|
||||
if (!x->aalg) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
strcpy(x->aalg->alg_name, a->name);
|
||||
x->aalg->alg_key_len = 0;
|
||||
if (key) {
|
||||
|
@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
|
|||
goto out;
|
||||
}
|
||||
x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
|
||||
if (!x->calg)
|
||||
if (!x->calg) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
strcpy(x->calg->alg_name, a->name);
|
||||
x->props.calgo = sa->sadb_sa_encrypt;
|
||||
} else {
|
||||
|
@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
|
|||
if (key)
|
||||
keysize = (key->sadb_key_bits + 7) / 8;
|
||||
x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
|
||||
if (!x->ealg)
|
||||
if (!x->ealg) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
strcpy(x->ealg->alg_name, a->name);
|
||||
x->ealg->alg_key_len = 0;
|
||||
if (key) {
|
||||
|
@ -1227,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
|
|||
struct xfrm_encap_tmpl *natt;
|
||||
|
||||
x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
|
||||
if (!x->encap)
|
||||
if (!x->encap) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
natt = x->encap;
|
||||
n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
|
||||
|
|
|
@ -891,12 +891,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
|||
supp_ht = supp_ht || sband->ht_cap.ht_supported;
|
||||
supp_vht = supp_vht || sband->vht_cap.vht_supported;
|
||||
|
||||
if (sband->ht_cap.ht_supported)
|
||||
local->rx_chains =
|
||||
max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
|
||||
local->rx_chains);
|
||||
if (!sband->ht_cap.ht_supported)
|
||||
continue;
|
||||
|
||||
/* TODO: consider VHT for RX chains, hopefully it's the same */
|
||||
local->rx_chains =
|
||||
max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
|
||||
local->rx_chains);
|
||||
|
||||
/* no need to mask, SM_PS_DISABLED has all bits set */
|
||||
sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
|
||||
IEEE80211_HT_CAP_SM_PS_SHIFT;
|
||||
}
|
||||
|
||||
/* if low-level driver supports AP, we also support VLAN */
|
||||
|
|
|
@ -45,6 +45,8 @@
|
|||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/nf_conntrack_timestamp.h>
|
||||
#include <net/netfilter/nf_conntrack_labels.h>
|
||||
#include <net/netfilter/nf_conntrack_seqadj.h>
|
||||
#include <net/netfilter/nf_conntrack_synproxy.h>
|
||||
#ifdef CONFIG_NF_NAT_NEEDED
|
||||
#include <net/netfilter/nf_nat_core.h>
|
||||
#include <net/netfilter/nf_nat_l4proto.h>
|
||||
|
@ -1798,6 +1800,8 @@ ctnetlink_create_conntrack(struct net *net,
|
|||
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
|
||||
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
|
||||
nf_ct_labels_ext_add(ct);
|
||||
nfct_seqadj_ext_add(ct);
|
||||
nfct_synproxy_ext_add(ct);
|
||||
|
||||
/* we must add conntrack extensions before confirmation. */
|
||||
ct->status |= IPS_CONFIRMED;
|
||||
|
|
|
@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
|||
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
|
||||
tcp_hdrlen = tcph->doff * 4;
|
||||
|
||||
if (len < tcp_hdrlen)
|
||||
if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
|
||||
return -1;
|
||||
|
||||
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
|
||||
|
@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
|||
if (len > tcp_hdrlen)
|
||||
return 0;
|
||||
|
||||
/* tcph->doff has 4 bits, do not wrap it to 0 */
|
||||
if (tcp_hdrlen >= 15 * 4)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* MSS Option not found ?! add it..
|
||||
*/
|
||||
|
|
|
@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
|
|||
union sctp_addr *laddr = (union sctp_addr *)addr;
|
||||
struct sctp_transport *transport;
|
||||
|
||||
if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
|
||||
if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
|
||||
return NULL;
|
||||
|
||||
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
|
||||
|
|
|
@ -997,7 +997,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||
struct path path = { NULL, NULL };
|
||||
|
||||
err = -EINVAL;
|
||||
if (sunaddr->sun_family != AF_UNIX)
|
||||
if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
|
||||
sunaddr->sun_family != AF_UNIX)
|
||||
goto out;
|
||||
|
||||
if (addr_len == sizeof(short)) {
|
||||
|
@ -1108,6 +1109,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
|
|||
unsigned int hash;
|
||||
int err;
|
||||
|
||||
err = -EINVAL;
|
||||
if (alen < offsetofend(struct sockaddr, sa_family))
|
||||
goto out;
|
||||
|
||||
if (addr->sa_family != AF_UNSPEC) {
|
||||
err = unix_mkname(sunaddr, alen, &hash);
|
||||
if (err < 0)
|
||||
|
|
|
@ -1776,43 +1776,6 @@ free_dst:
|
|||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFRM_SUB_POLICY
|
||||
static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
|
||||
{
|
||||
if (!*target) {
|
||||
*target = kmalloc(size, GFP_ATOMIC);
|
||||
if (!*target)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(*target, src, size);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int xfrm_dst_update_parent(struct dst_entry *dst,
|
||||
const struct xfrm_selector *sel)
|
||||
{
|
||||
#ifdef CONFIG_XFRM_SUB_POLICY
|
||||
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
||||
return xfrm_dst_alloc_copy((void **)&(xdst->partner),
|
||||
sel, sizeof(*sel));
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int xfrm_dst_update_origin(struct dst_entry *dst,
|
||||
const struct flowi *fl)
|
||||
{
|
||||
#ifdef CONFIG_XFRM_SUB_POLICY
|
||||
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
||||
return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
|
||||
struct xfrm_policy **pols,
|
||||
int *num_pols, int *num_xfrms)
|
||||
|
@ -1884,16 +1847,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
|
|||
|
||||
xdst = (struct xfrm_dst *)dst;
|
||||
xdst->num_xfrms = err;
|
||||
if (num_pols > 1)
|
||||
err = xfrm_dst_update_parent(dst, &pols[1]->selector);
|
||||
else
|
||||
err = xfrm_dst_update_origin(dst, fl);
|
||||
if (unlikely(err)) {
|
||||
dst_free(dst);
|
||||
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
xdst->num_pols = num_pols;
|
||||
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
|
||||
xdst->policy_genid = atomic_read(&pols[0]->genid);
|
||||
|
|
|
@ -294,6 +294,8 @@ struct hda_codec {
|
|||
|
||||
#define list_for_each_codec(c, bus) \
|
||||
list_for_each_entry(c, &(bus)->core.codec_list, core.list)
|
||||
#define list_for_each_codec_safe(c, n, bus) \
|
||||
list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
|
||||
|
||||
/* snd_hda_codec_read/write optional flags */
|
||||
#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
|
||||
|
|
|
@ -1128,8 +1128,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
|
|||
/* configure each codec instance */
|
||||
int azx_codec_configure(struct azx *chip)
|
||||
{
|
||||
struct hda_codec *codec;
|
||||
list_for_each_codec(codec, &chip->bus) {
|
||||
struct hda_codec *codec, *next;
|
||||
|
||||
/* use _safe version here since snd_hda_codec_configure() deregisters
|
||||
* the device upon error and deletes itself from the bus list.
|
||||
*/
|
||||
list_for_each_codec_safe(codec, next, &chip->bus) {
|
||||
snd_hda_codec_configure(codec);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -3190,6 +3190,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
|
|||
spec->input_paths[i][nums]);
|
||||
spec->input_paths[i][nums] =
|
||||
spec->input_paths[i][n];
|
||||
spec->input_paths[i][n] = 0;
|
||||
}
|
||||
}
|
||||
nums++;
|
||||
|
|
|
@ -1460,16 +1460,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
|
|||
Dwarf_Addr _addr = 0, baseaddr = 0;
|
||||
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
|
||||
int baseline = 0, lineno = 0, ret = 0;
|
||||
bool reloc = false;
|
||||
|
||||
retry:
|
||||
/* We always need to relocate the address for aranges */
|
||||
if (debuginfo__get_text_offset(dbg, &baseaddr) == 0)
|
||||
addr += baseaddr;
|
||||
/* Find cu die */
|
||||
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
|
||||
if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
|
||||
addr += baseaddr;
|
||||
reloc = true;
|
||||
goto retry;
|
||||
}
|
||||
pr_warning("Failed to find debug information for address %lx\n",
|
||||
addr);
|
||||
ret = -EINVAL;
|
||||
|
|
Loading…
Add table
Reference in a new issue