This is the 4.4.105 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlosIJUACgkQONu9yGCS
 aT5/DQ/9Hkroo15HPErzBhLUIo3tdT8HyMBSA5gjy3x+fYBBkbj1HvyPWlwbZVgQ
 q2xQs7+Au8UlLLOV2UDz5C3K+hU4tM+uS3TC781ats5+VYWXAg6lXMshqwuXCDCX
 eZ1joC1cs+tHQ1AinXV341c6TX9HOQ1/kwZRxvS9q/n8PQd71uzIXxsp/AkqloMQ
 zTYz4FKdFPmKrHmMHR2X4MbRX85S5S57IJwmsfFEB+89PhfHGT5UkuClUuVrxV6m
 a34BcDDG2SZ9V58OEDS99SJIvVthyisv7iDwoe6qBO0cITI/DiZ15T53gR+6nhMd
 L+Y78wWohLFXxNoQFVXax/viFlsSju9j3f9NZeBb3olFlGuSiITsoJethU33Tzg0
 Lf1X9ELMekBDyE/e2nYN+wILz6WioWxHrJ9nqAkwxOAL8YQTjfiUnjv/TUO/AJZ3
 dmoJyrHOjkUTJpSQXx60ZwyvIyBv/oy8CWbr0lzVoq23OBrP26o/1C1Sbrm6bGMG
 6fUccAqFEG9IW7fxUM0ojFRGL/+eSkHIMeEQopfNs4XXIBV53kUD8en+Hf4n5sic
 NskK/UZSrC8KCAG4wmv1T+alDSrjhn+7xMdzRieEthhjFouwH3i8OROaVS+YX8+y
 oDZpCZxBMhMCFKl+CWbqj8+DdNJQgs5+7R6YhEjEmxpiPfIPQhQ=
 =XPGm
 -----END PGP SIGNATURE-----

Merge 4.4.105 into android-4.4

Changes in 4.4.105
	bcache: only permit to recovery read error when cache device is clean
	bcache: recover data from backing when data is clean
	uas: Always apply US_FL_NO_ATA_1X quirk to Seagate devices
	usb: quirks: Add no-lpm quirk for KY-688 USB 3.1 Type-C Hub
	serial: 8250_pci: Add Amazon PCI serial device ID
	s390/runtime instrumentation: simplify task exit handling
	USB: serial: option: add Quectel BG96 id
	ima: fix hash algorithm initialization
	s390/pci: do not require AIS facility
	selftests/x86/ldt_get: Add a few additional tests for limits
	serial: 8250_fintek: Fix rs485 disablement on invalid ioctl()
	spi: sh-msiof: Fix DMA transfer size check
	usb: phy: tahvo: fix error handling in tahvo_usb_probe()
	serial: 8250: Preserve DLD[7:4] for PORT_XR17V35X
	x86/entry: Use SYSCALL_DEFINE() macros for sys_modify_ldt()
	EDAC, sb_edac: Fix missing break in switch
	sysrq : fix Show Regs call trace on ARM
	perf test attr: Fix ignored test case result
	kprobes/x86: Disable preemption in ftrace-based jprobes
	net: systemport: Utilize skb_put_padto()
	net: systemport: Pad packet before inserting TSB
	ARM: OMAP1: DMA: Correct the number of logical channels
	vti6: fix device register to report IFLA_INFO_KIND
	net/appletalk: Fix kernel memory disclosure
	ravb: Remove Rx overflow log messages
	nfs: Don't take a reference on fl->fl_file for LOCK operation
	KVM: arm/arm64: Fix occasional warning from the timer work function
	NFSv4: Fix client recovery when server reboots multiple times
	drm/exynos/decon5433: set STANDALONE_UPDATE_F on output enablement
	net: sctp: fix array overrun read on sctp_timer_tbl
	tipc: fix cleanup at module unload
	dmaengine: pl330: fix double lock
	tcp: correct memory barrier usage in tcp_check_space()
	mm: avoid returning VM_FAULT_RETRY from ->page_mkwrite handlers
	xen-netfront: Improve error handling during initialization
	net: fec: fix multicast filtering hardware setup
	Revert "ocfs2: should wait dio before inode lock in ocfs2_setattr()"
	usb: hub: Cycle HUB power when initialization fails
	usb: xhci: fix panic in xhci_free_virt_devices_depth_first
	usb: Add USB 3.1 Precision time measurement capability descriptor support
	usb: ch9: Add size macro for SSP dev cap descriptor
	USB: core: Add type-specific length check of BOS descriptors
	USB: Increase usbfs transfer limit
	USB: devio: Prevent integer overflow in proc_do_submiturb()
	USB: usbfs: Filter flags passed in from user space
	usb: host: fix incorrect updating of offset
	xen-netfront: avoid crashing on resume after a failure in talk_to_netback()
	Linux 4.4.105

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-12-10 14:42:29 +01:00
commit 8a5396242e
50 changed files with 269 additions and 181 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 104
SUBLEVEL = 105
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -31,7 +31,6 @@
#include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800)
#define OMAP1_LOGICAL_DMA_CH_COUNT 17
static u32 enable_1510_mode;
@ -311,8 +310,6 @@ static int __init omap1_system_dma_init(void)
goto exit_iounmap;
}
d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
@ -329,13 +326,14 @@ static int __init omap1_system_dma_init(void)
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
if (cpu_is_omap15xx())
d->chan_count = 9;
else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
if (!(d->dev_caps & ENABLE_1510_MODE))
d->chan_count = 16;
/* available logical channels */
if (cpu_is_omap15xx()) {
d->lch_count = 9;
} else {
if (d->dev_caps & ENABLE_1510_MODE)
d->lch_count = 9;
else
d->chan_count = 9;
d->lch_count = 16;
}
p = dma_plat_info;

View file

@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif

View file

@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
load_runtime_instr_cb(&runtime_instr_empty_cb);
}
void exit_thread_runtime_instr(void);
struct task_struct;
void runtime_instr_release(struct task_struct *tsk);
#endif /* _RUNTIME_INSTR_H */

View file

@ -72,7 +72,6 @@ extern void kernel_thread_starter(void);
*/
void exit_thread(void)
{
exit_thread_runtime_instr();
}
void flush_thread(void)
@ -87,6 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk)
{
/* Free either the floating-point or the vector register save area */
kfree(tsk->thread.fpu.regs);
runtime_instr_release(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)

View file

@ -18,11 +18,24 @@
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
void runtime_instr_release(struct task_struct *tsk)
{
kfree(tsk->thread.ri_cb);
}
static void disable_runtime_instr(void)
{
struct pt_regs *regs = task_pt_regs(current);
struct task_struct *task = current;
struct pt_regs *regs;
if (!task->thread.ri_cb)
return;
regs = task_pt_regs(task);
preempt_disable();
load_runtime_instr_cb(&runtime_instr_empty_cb);
kfree(task->thread.ri_cb);
task->thread.ri_cb = NULL;
preempt_enable();
/*
* Make sure the RI bit is deleted from the PSW. If the user did not
@ -43,19 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->valid = 1;
}
void exit_thread_runtime_instr(void)
{
struct task_struct *task = current;
preempt_disable();
if (!task->thread.ri_cb)
return;
disable_runtime_instr();
kfree(task->thread.ri_cb);
task->thread.ri_cb = NULL;
preempt_enable();
}
SYSCALL_DEFINE1(s390_runtime_instr, int, command)
{
struct runtime_instr_cb *cb;
@ -64,7 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
exit_thread_runtime_instr();
disable_runtime_instr();
return 0;
}

View file

@ -359,7 +359,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
break;
si = 0;
continue;
}
@ -921,7 +922,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
if (!test_facility(69) || !test_facility(71) || !test_facility(72))
if (!test_facility(69) || !test_facility(71))
return 0;
rc = zpci_debug_init();

View file

@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
#include <asm/processor.h>
@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
}
/* Set Interruption Controls */
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{
if (!test_facility(72))
return -EIO;
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
return 0;
}
/* PCI Load */

View file

@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
asmlinkage long sys_iopl(unsigned int);
/* kernel/ldt.c */
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
asmlinkage long sys_rt_sigreturn(void);

View file

@ -26,7 +26,7 @@
#include "common.h"
static nokprobe_inline
int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, unsigned long orig_ip)
{
/*
@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, NULL);
if (orig_ip)
regs->ip = orig_ip;
return 1;
}
int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
if (kprobe_ftrace(p))
return __skip_singlestep(p, regs, kcb, 0);
else
return 0;
if (kprobe_ftrace(p)) {
__skip_singlestep(p, regs, kcb, 0);
preempt_enable_no_resched();
return 1;
}
return 0;
}
NOKPROBE_SYMBOL(skip_singlestep);
/* Ftrace callback handler for kprobes */
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
/* To emulate trap based kprobes, preempt_disable here */
preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs))
if (!p->pre_handler || !p->pre_handler(p, regs)) {
__skip_singlestep(p, regs, kcb, orig_ip);
preempt_enable_no_resched();
}
/*
* If pre_handler returns !0, it sets regs->ip and
* resets current kprobe.
* resets current kprobe, and keep preempt count +1.
*/
}
end:

View file

@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@ -271,8 +272,8 @@ out:
return error;
}
asmlinkage int sys_modify_ldt(int func, void __user *ptr,
unsigned long bytecount)
SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
unsigned long , bytecount)
{
int ret = -ENOSYS;
@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
ret = write_ldt(ptr, bytecount, 0);
break;
}
return ret;
/*
* The SYSCALL_DEFINE() macros give us an 'unsigned long'
* return type, but tht ABI for sys_modify_ldt() expects
* 'int'. This cast gives us an int-sized value in %rax
* for the return code. The 'unsigned' is necessary so
* the compiler does not try to sign-extend the negative
* return codes into the high half of the register when
* taking the value from int->long.
*/
return (unsigned int)ret;
}

View file

@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <os.h>
@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
mm->arch.ldt.entry_count = 0;
}
int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
unsigned long , bytecount)
{
return do_modify_ldt_skas(func, ptr, bytecount);
/* See non-um modify_ldt() for why we do this cast */
return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
}

View file

@ -1657,7 +1657,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
unsigned long flags;
int chans, i;
if (pl330->state == DYING)
@ -1665,8 +1664,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
chans = pl330->pcfg.num_chan;
spin_lock_irqsave(&pl330->lock, flags);
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
@ -1684,8 +1681,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
thrd = NULL;
}
spin_unlock_irqrestore(&pl330->lock, flags);
return thrd;
}
@ -1703,7 +1698,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330;
unsigned long flags;
if (!thrd || thrd->free)
return;
@ -1715,10 +1709,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
pl330 = thrd->dmac;
spin_lock_irqsave(&pl330->lock, flags);
_free_event(thrd, thrd->ev);
thrd->free = true;
spin_unlock_irqrestore(&pl330->lock, flags);
}
/* Initialize the structure for PL330 configuration, that can be used
@ -2085,20 +2077,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
spin_lock_irqsave(&pch->lock, flags);
spin_lock_irqsave(&pl330->lock, flags);
dma_cookie_init(chan);
pch->cyclic = false;
pch->thread = pl330_request_channel(pl330);
if (!pch->thread) {
spin_unlock_irqrestore(&pch->lock, flags);
spin_unlock_irqrestore(&pl330->lock, flags);
return -ENOMEM;
}
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
spin_unlock_irqrestore(&pch->lock, flags);
spin_unlock_irqrestore(&pl330->lock, flags);
return 1;
}
@ -2201,12 +2193,13 @@ static int pl330_pause(struct dma_chan *chan)
static void pl330_free_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
tasklet_kill(&pch->task);
pm_runtime_get_sync(pch->dmac->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
spin_lock_irqsave(&pl330->lock, flags);
pl330_release_channel(pch->thread);
pch->thread = NULL;
@ -2214,7 +2207,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
spin_unlock_irqrestore(&pl330->lock, flags);
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}

View file

@ -1773,6 +1773,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
pvt->pci_ta = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
pvt->pci_ras = pdev;
break;

View file

@ -180,6 +180,8 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
/* enable output and display signal */
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,

View file

@ -708,7 +708,14 @@ static void cached_dev_read_error(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
if (s->recoverable) {
/*
* If read request hit dirty data (s->read_dirty_data is true),
* then recovery a failed read request from cached device may
* get a stale data back. So read failure recovery is only
* permitted when read request hit clean data in cache device,
* or when cache read race happened.
*/
if (s->recoverable && !s->read_dirty_data) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);

View file

@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ipddp_create(struct ipddp_route *new_rt)
{
struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL)
return -ENOMEM;

View file

@ -1045,6 +1045,18 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
* need to make sure that packets are at least 68 bytes
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*/
if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
ret = NETDEV_TX_OK;
goto out;
}
/* Insert TSB and checksum infos */
if (priv->tsb_en) {
skb = bcm_sysport_insert_tsb(skb, dev);
@ -1054,20 +1066,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
}
}
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
* need to make sure that packets are at least 68 bytes
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*/
if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
ret = NETDEV_TX_OK;
goto out;
}
skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
skb_len = skb->len;
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
if (dma_mapping_error(kdev, mapping)) {

View file

@ -2968,6 +2968,7 @@ static void set_multicast_list(struct net_device *ndev)
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
unsigned int hash_high = 0, hash_low = 0;
if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
@ -2990,11 +2991,7 @@ static void set_multicast_list(struct net_device *ndev)
return;
}
/* Clear filter and add the addresses in hash register
*/
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
/* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
crc = 0xffffffff;
@ -3012,16 +3009,14 @@ static void set_multicast_list(struct net_device *ndev)
*/
hash = (crc >> (32 - HASH_BITS)) & 0x3f;
if (hash > 31) {
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
tmp |= 1 << (hash - 32);
writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
} else {
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
tmp |= 1 << hash;
writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
}
if (hash > 31)
hash_high |= 1 << (hash - 32);
else
hash_low |= 1 << hash;
}
writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
}
/* Set a MAC change in hardware. */

View file

@ -831,14 +831,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
}
out:
return budget - quota;
}

View file

@ -1840,27 +1840,19 @@ static int talk_to_netback(struct xenbus_device *dev,
xennet_destroy_queues(info);
err = xennet_create_queues(info, &num_queues);
if (err < 0)
goto destroy_ring;
if (err < 0) {
xenbus_dev_fatal(dev, err, "creating queues");
kfree(info->queues);
info->queues = NULL;
goto out;
}
/* Create shared ring, alloc event channel -- for each queue */
for (i = 0; i < num_queues; ++i) {
queue = &info->queues[i];
err = setup_netfront(dev, queue, feature_split_evtchn);
if (err) {
/* setup_netfront() will tidy up the current
* queue on error, but we need to clean up
* those already allocated.
*/
if (i > 0) {
rtnl_lock();
netif_set_real_num_tx_queues(info->netdev, i);
rtnl_unlock();
goto destroy_ring;
} else {
goto out;
}
}
if (err)
goto destroy_ring;
}
again:
@ -1950,9 +1942,9 @@ abort_transaction_no_dev_fatal:
xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
kfree(info->queues);
info->queues = NULL;
xennet_destroy_queues(info);
out:
device_unregister(&dev->dev);
return err;
}

View file

@ -863,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
if (l & 1)
if (l & 3)
break;
copy32 = copy_wswap32;
} else {

View file

@ -402,15 +402,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = VM_FAULT_LOCKED;
break;
case -ENODATA:
case -EAGAIN:
case -EFAULT:
result = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
result = VM_FAULT_OOM;
break;
case -EAGAIN:
result = VM_FAULT_RETRY;
break;
default:
result = VM_FAULT_SIGBUS;
break;

View file

@ -117,7 +117,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
(!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
rs485->flags &= SER_RS485_ENABLED;
rs485->flags &= ~SER_RS485_ENABLED;
else
config |= RS485_URA;

View file

@ -5797,6 +5797,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
/* Amazon PCI serial device */
{ PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL

View file

@ -2223,8 +2223,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_dl_write(up, quot);
/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
if (up->port.type == PORT_XR17V35X)
if (up->port.type == PORT_XR17V35X) {
/* Preserve bits not related to baudrate; DLD[7:4]. */
quot_frac |= serial_port_in(port, 0x2) & 0xf0;
serial_port_out(port, 0x2, quot_frac);
}
}
static unsigned int

View file

@ -237,8 +237,10 @@ static void sysrq_handle_showallcpus(int key)
* architecture has no support for it:
*/
if (!trigger_all_cpu_backtrace()) {
struct pt_regs *regs = get_irq_regs();
struct pt_regs *regs = NULL;
if (in_irq())
regs = get_irq_regs();
if (regs) {
pr_info("CPU%d:\n", smp_processor_id());
show_regs(regs);
@ -257,7 +259,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
static void sysrq_handle_showregs(int key)
{
struct pt_regs *regs = get_irq_regs();
struct pt_regs *regs = NULL;
if (in_irq())
regs = get_irq_regs();
if (regs)
show_regs(regs);
perf_event_print_debug();

View file

@ -871,14 +871,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
}
}
static const __u8 bos_desc_len[256] = {
[USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
[USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
[USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
[USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
[CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
[USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
};
/* Get BOS descriptor set */
int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
struct usb_ssp_cap_descriptor *ssp_cap;
unsigned char *buffer;
int length, total_len, num, i;
int length, total_len, num, i, ssac;
__u8 cap_type;
int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@ -931,7 +942,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->desc->bNumDeviceCaps = i;
break;
}
cap_type = cap->bDevCapabilityType;
length = cap->bLength;
if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
dev->bos->desc->bNumDeviceCaps = i;
break;
}
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@ -939,7 +956,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
continue;
}
switch (cap->bDevCapabilityType) {
switch (cap_type) {
case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */
break;
@ -952,13 +969,19 @@ int usb_get_bos_descriptor(struct usb_device *dev)
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
dev->bos->ssp_cap =
(struct usb_ssp_cap_descriptor *)buffer;
ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
default:
break;
}

View file

@ -113,42 +113,38 @@ enum snoop_when {
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
/* Limit on the total amount of memory we can allocate for transfers */
static unsigned usbfs_memory_mb = 16;
static u32 usbfs_memory_mb = 16;
module_param(usbfs_memory_mb, uint, 0644);
MODULE_PARM_DESC(usbfs_memory_mb,
"maximum MB allowed for usbfs buffers (0 = no limit)");
/* Hard limit, necessary to avoid arithmetic overflow */
#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
/* Check whether it's okay to allocate more memory for a transfer */
static int usbfs_increase_memory_usage(unsigned amount)
static int usbfs_increase_memory_usage(u64 amount)
{
unsigned lim;
u64 lim;
/*
* Convert usbfs_memory_mb to bytes, avoiding overflows.
* 0 means use the hard limit (effectively unlimited).
*/
lim = ACCESS_ONCE(usbfs_memory_mb);
if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
lim = USBFS_XFER_MAX;
else
lim <<= 20;
lim <<= 20;
atomic_add(amount, &usbfs_memory_usage);
if (atomic_read(&usbfs_memory_usage) <= lim)
return 0;
atomic_sub(amount, &usbfs_memory_usage);
return -ENOMEM;
atomic64_add(amount, &usbfs_memory_usage);
if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
atomic64_sub(amount, &usbfs_memory_usage);
return -ENOMEM;
}
return 0;
}
/* Memory for a transfer is being deallocated */
static void usbfs_decrease_memory_usage(unsigned amount)
static void usbfs_decrease_memory_usage(u64 amount)
{
atomic_sub(amount, &usbfs_memory_usage);
atomic64_sub(amount, &usbfs_memory_usage);
}
static int connected(struct usb_dev_state *ps)
@ -1077,7 +1073,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
return -EINVAL;
len1 = bulk.len;
if (len1 >= USBFS_XFER_MAX)
if (len1 >= (INT_MAX - sizeof(struct urb)))
return -EINVAL;
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
@ -1297,13 +1293,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
USBDEVFS_URB_SHORT_NOT_OK |
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT))
USBDEVFS_URB_NO_INTERRUPT;
/* USBDEVFS_URB_ISO_ASAP is a special case */
if (uurb->type == USBDEVFS_URB_TYPE_ISO)
mask |= USBDEVFS_URB_ISO_ASAP;
if (uurb->flags & ~mask)
return -EINVAL;
if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer)
return -EINVAL;
@ -1424,10 +1426,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
return -EINVAL;
}
if (uurb->buffer_length >= USBFS_XFER_MAX) {
ret = -EINVAL;
goto error;
}
if (uurb->buffer_length > 0 &&
!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
uurb->buffer, uurb->buffer_length)) {

View file

@ -4858,6 +4858,15 @@ loop:
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
/* When halfway through our retry count, power-cycle the port */
if (i == (SET_CONFIG_TRIES / 2) - 1) {
dev_info(&port_dev->dev, "attempt power cycle\n");
usb_hub_set_port_power(hdev, hub, port1, false);
msleep(2 * hub_power_on_good_delay(hub));
usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
}
}
if (hub->hdev->parent ||
!hcd->driver->port_handed_over ||

View file

@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },

View file

@ -851,7 +851,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
default: /* unknown */
break;
}
temp = (cap >> 8) & 0xff;
offset = (cap >> 8) & 0xff;
}
}
#endif

View file

@ -981,6 +981,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
if (!vdev)
return;
if (vdev->real_port == 0 ||
vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "Bad vdev->real_port.\n");
goto out;
}
tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
@ -994,6 +1000,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
}
}
}
out:
/* we are now at a leaf device */
xhci_free_virt_device(xhci, slot_id);
}

View file

@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
ret = PTR_ERR(tu->extcon);
goto err_disable_clk;
}
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);

View file

@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),

View file

@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
}
}
/* All Seagate disk enclosures have broken ATA pass-through support */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
flags |= US_FL_NO_ATA_1X;
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS) {

View file

@ -38,7 +38,6 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
@ -5738,7 +5737,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
@ -5851,7 +5849,6 @@ static void nfs4_lock_release(void *calldata)
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
fput(data->fl.fl_file);
kfree(data);
dprintk("%s: done!\n", __func__);
}

View file

@ -1680,7 +1680,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
break;
case -NFS4ERR_STALE_CLIENTID:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:

View file

@ -1166,13 +1166,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
if (size_change) {
/*
* Here we should wait dio to finish before inode lock
* to avoid a deadlock between ocfs2_setattr() and
* ocfs2_dio_end_io_write()
*/
inode_dio_wait(inode);
status = ocfs2_rw_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
@ -1193,6 +1186,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (status)
goto bail_unlock;
inode_dio_wait(inode);
if (i_size_read(inode) >= attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
status = ocfs2_begin_ordered_truncate(inode,

View file

@ -234,12 +234,10 @@ static inline int block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
if (err == -EFAULT)
if (err == -EFAULT || err == -EAGAIN)
return VM_FAULT_NOPAGE;
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err == -EAGAIN)
return VM_FAULT_RETRY;
/* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}

View file

@ -330,6 +330,7 @@ struct usb_host_bos {
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
struct usb_ss_container_id_descriptor *ss_id;
struct usb_ptm_cap_descriptor *ptm_cap;
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,

View file

@ -812,6 +812,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
__u8 bReserved;
} __attribute__((packed));
#define USB_DT_USB_WIRELESS_CAP_SIZE 11
/* USB 2.0 Extension descriptor */
#define USB_CAP_TYPE_EXT 2
@ -895,6 +897,22 @@ struct usb_ssp_cap_descriptor {
#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
} __attribute__((packed));
/*
* Precision time measurement capability descriptor: advertised by devices and
* hubs that support PTM
*/
#define USB_PTM_CAP_TYPE 0xb
struct usb_ptm_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
} __attribute__((packed));
/*
* The size of the descriptor for the Sublink Speed Attribute Count
* (SSAC) specified in bmAttributes[4:0].
*/
#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
/*-------------------------------------------------------------------------*/
@ -991,6 +1009,7 @@ enum usb3_link_state {
USB3_LPM_U3
};
#define USB_DT_USB_PTM_ID_SIZE 3
/*
* A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
* 0xff means the parent hub will accept transitions to U1, but will not

View file

@ -4943,7 +4943,7 @@ static void tcp_check_space(struct sock *sk)
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
/* pairs with tcp_poll() */
smp_mb__after_atomic();
smp_mb();
if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk);

View file

@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
int err;
dev->rtnl_link_ops = &vti6_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
strcpy(t->parms.name, dev->name);
dev->rtnl_link_ops = &vti6_link_ops;
dev_hold(dev);
vti6_tnl_link(ip6n, t);

View file

@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
/* Lookup timer debug name. */
const char *sctp_tname(const sctp_subtype_t id)
{
if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
return sctp_timer_tbl[id.timeout];
return "unknown_timer";
}

View file

@ -618,14 +618,12 @@ int tipc_server_start(struct tipc_server *s)
void tipc_server_stop(struct tipc_server *s)
{
struct tipc_conn *con;
int total = 0;
int id;
spin_lock_bh(&s->idr_lock);
for (id = 0; total < s->idr_in_use; id++) {
for (id = 0; s->idr_in_use; id++) {
con = idr_find(&s->conn_idr, id);
if (con) {
total++;
spin_unlock_bh(&s->idr_lock);
tipc_close_conn(con);
spin_lock_bh(&s->idr_lock);

View file

@ -52,6 +52,8 @@ static int __init hash_setup(char *str)
ima_hash_algo = HASH_ALGO_SHA1;
else if (strncmp(str, "md5", 3) == 0)
ima_hash_algo = HASH_ALGO_MD5;
else
return 1;
goto out;
}
@ -61,6 +63,8 @@ static int __init hash_setup(char *str)
break;
}
}
if (i == HASH_ALGO__LAST)
return 1;
out:
hash_setup_done = 1;
return 1;

View file

@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
d, d, perf, vcnt, v);
return system(cmd);
return system(cmd) ? TEST_FAIL : TEST_OK;
}
int test__attr(void)

View file

@ -351,9 +351,24 @@ static void do_simple_tests(void)
install_invalid(&desc, false);
desc.seg_not_present = 0;
desc.read_exec_only = 0;
desc.seg_32bit = 1;
desc.read_exec_only = 0;
desc.limit = 0xfffff;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
desc.limit_in_pages = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
desc.contents = 1;
desc.read_exec_only = 0;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
desc.limit = 0;
install_invalid(&desc, true);
}

View file

@ -84,9 +84,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false;
WARN_ON(!kvm_timer_should_fire(vcpu));
/*
* If the vcpu is blocked we want to wake it up so that it will see